From 8d91c1e411f55d7ea91b1183a2e9f8088fb4d5be Mon Sep 17 00:00:00 2001 From: André Fabian Silva Delgado Date: Tue, 15 Dec 2015 14:52:16 -0300 Subject: Linux-libre 4.3.2-gnu --- drivers/Kconfig | 4 + drivers/Makefile | 4 +- drivers/acpi/Kconfig | 20 +- drivers/acpi/Makefile | 8 +- drivers/acpi/ac.c | 4 - drivers/acpi/acpi_apd.c | 1 - drivers/acpi/acpi_ipmi.c | 4 - drivers/acpi/acpi_lpss.c | 39 +- drivers/acpi/acpi_memhotplug.c | 5 - drivers/acpi/acpi_pad.c | 4 - drivers/acpi/acpi_pnp.c | 2 - drivers/acpi/acpi_processor.c | 2 +- drivers/acpi/acpi_video.c | 4 - drivers/acpi/acpica/Makefile | 2 + drivers/acpi/acpica/acdebug.h | 26 +- drivers/acpi/acpica/acdispat.h | 8 + drivers/acpi/acpica/acglobal.h | 21 +- drivers/acpi/acpica/acinterp.h | 22 + drivers/acpi/acpica/aclocal.h | 28 +- drivers/acpi/acpica/acmacros.h | 9 + drivers/acpi/acpica/acnamesp.h | 13 +- drivers/acpi/acpica/acobject.h | 1 + drivers/acpi/acpica/acparser.h | 4 +- drivers/acpi/acpica/acstruct.h | 2 +- drivers/acpi/acpica/actables.h | 18 +- drivers/acpi/acpica/acutils.h | 25 +- drivers/acpi/acpica/dsargs.c | 4 +- drivers/acpi/acpica/dscontrol.c | 2 +- drivers/acpi/acpica/dsdebug.c | 231 + drivers/acpi/acpica/dsinit.c | 20 +- drivers/acpi/acpica/dsmethod.c | 35 +- drivers/acpi/acpica/dsopcode.c | 31 +- drivers/acpi/acpica/dswload.c | 2 +- drivers/acpi/acpica/dswload2.c | 2 +- drivers/acpi/acpica/evregion.c | 22 +- drivers/acpi/acpica/evxfevnt.c | 2 +- drivers/acpi/acpica/exconfig.c | 8 - drivers/acpi/acpica/excreate.c | 1 + drivers/acpi/acpica/exdebug.c | 324 + drivers/acpi/acpica/exdump.c | 5 +- drivers/acpi/acpica/exresnte.c | 2 +- drivers/acpi/acpica/exresolv.c | 16 +- drivers/acpi/acpica/hwxfsleep.c | 15 +- drivers/acpi/acpica/nseval.c | 4 +- drivers/acpi/acpica/nsload.c | 16 +- drivers/acpi/acpica/nsnames.c | 275 +- drivers/acpi/acpica/nsparse.c | 42 +- drivers/acpi/acpica/nsutils.c | 19 +- drivers/acpi/acpica/nsxfname.c | 8 +- drivers/acpi/acpica/psargs.c | 26 +- drivers/acpi/acpica/psloop.c | 32 +- drivers/acpi/acpica/psobject.c | 17 +- drivers/acpi/acpica/psparse.c | 14 +- drivers/acpi/acpica/psutils.c | 8 +- drivers/acpi/acpica/psxface.c | 123 +- drivers/acpi/acpica/rscreate.c | 3 +- drivers/acpi/acpica/tbfadt.c | 16 +- drivers/acpi/acpica/tbfind.c | 15 +- drivers/acpi/acpica/tbinstal.c | 40 +- drivers/acpi/acpica/tbutils.c | 99 +- drivers/acpi/acpica/tbxfload.c | 93 +- drivers/acpi/acpica/utdebug.c | 31 +- drivers/acpi/acpica/utdelete.c | 3 + drivers/acpi/acpica/utfileio.c | 2 +- drivers/acpi/acpica/utinit.c | 3 +- drivers/acpi/acpica/utmisc.c | 4 +- drivers/acpi/acpica/utnonansi.c | 380 + drivers/acpi/acpica/utstring.c | 342 - drivers/acpi/acpica/utxface.c | 12 +- drivers/acpi/acpica/utxfinit.c | 11 - drivers/acpi/apei/apei-base.c | 4 - drivers/acpi/apei/einj.c | 4 - drivers/acpi/apei/erst-dbg.c | 4 - drivers/acpi/apei/erst.c | 4 - drivers/acpi/apei/ghes.c | 4 - drivers/acpi/apei/hest.c | 4 - drivers/acpi/battery.c | 4 - drivers/acpi/blacklist.c | 4 - drivers/acpi/bus.c | 420 +- drivers/acpi/button.c | 4 - drivers/acpi/cm_sbs.c | 4 - drivers/acpi/container.c | 4 - drivers/acpi/debugfs.c | 2 + drivers/acpi/device_pm.c | 12 +- drivers/acpi/device_sysfs.c | 521 + drivers/acpi/dock.c | 4 - drivers/acpi/ec.c | 88 +- drivers/acpi/fan.c | 4 - drivers/acpi/hed.c | 4 - drivers/acpi/int340x_thermal.c | 9 +- drivers/acpi/internal.h | 16 +- drivers/acpi/nfit.c | 79 +- drivers/acpi/nfit.h | 17 +- drivers/acpi/numa.c | 4 - drivers/acpi/osl.c | 45 +- drivers/acpi/pci_irq.c | 22 +- drivers/acpi/pci_link.c | 20 +- drivers/acpi/pci_root.c | 4 - drivers/acpi/pci_slot.c | 4 - drivers/acpi/power.c | 19 +- drivers/acpi/processor_driver.c | 92 +- drivers/acpi/processor_idle.c | 4 - drivers/acpi/processor_perflib.c | 10 +- drivers/acpi/processor_thermal.c | 4 - drivers/acpi/processor_throttling.c | 4 - drivers/acpi/property.c | 5 +- drivers/acpi/resource.c | 4 - drivers/acpi/sbs.c | 4 - drivers/acpi/scan.c | 860 -- drivers/acpi/sysfs.c | 133 +- drivers/acpi/tables.c | 4 - drivers/acpi/thermal.c | 16 +- drivers/acpi/utils.c | 4 - drivers/android/binder.c | 2 +- drivers/ata/libata-core.c | 1 + drivers/ata/pata_arasan_cf.c | 11 +- drivers/ata/pata_rb532_cf.c | 3 +- drivers/ata/sata_rcar.c | 4 - drivers/atm/he.c | 7 +- drivers/atm/solos-pci.c | 12 +- drivers/auxdisplay/ks0108.c | 98 +- drivers/base/Makefile | 1 + drivers/base/base.h | 3 +- drivers/base/core.c | 95 + drivers/base/cpu.c | 2 +- drivers/base/dd.c | 28 + drivers/base/dma-contiguous.c | 2 +- drivers/base/firmware_class.c | 2 +- drivers/base/platform-msi.c | 270 + drivers/base/power/domain.c | 424 +- drivers/base/power/domain_governor.c | 22 +- drivers/base/power/main.c | 2 +- drivers/base/power/opp.c | 1108 +- drivers/base/power/power.h | 2 + drivers/base/power/qos.c | 37 + drivers/base/power/sysfs.c | 11 + drivers/base/property.c | 100 +- drivers/base/regmap/internal.h | 12 +- drivers/base/regmap/regcache.c | 2 +- drivers/base/regmap/regmap-ac97.c | 41 +- drivers/base/regmap/regmap-debugfs.c | 99 +- drivers/base/regmap/regmap-i2c.c | 90 +- drivers/base/regmap/regmap-irq.c | 4 +- drivers/base/regmap/regmap-mmio.c | 52 +- drivers/base/regmap/regmap-spi.c | 41 +- drivers/base/regmap/regmap-spmi.c | 78 +- drivers/base/regmap/regmap.c | 368 +- drivers/bcma/Kconfig | 2 +- drivers/bcma/bcma_private.h | 1 + drivers/bcma/driver_gpio.c | 92 +- drivers/bcma/main.c | 36 + drivers/block/aoe/aoeblk.c | 2 +- drivers/block/aoe/aoecmd.c | 10 +- drivers/block/aoe/aoedev.c | 2 +- drivers/block/brd.c | 23 +- drivers/block/drbd/drbd_actlog.c | 4 +- drivers/block/drbd/drbd_bitmap.c | 19 +- drivers/block/drbd/drbd_int.h | 12 +- drivers/block/drbd/drbd_main.c | 1 - drivers/block/drbd/drbd_nl.c | 4 +- drivers/block/drbd/drbd_req.c | 47 +- drivers/block/drbd/drbd_worker.c | 44 +- drivers/block/floppy.c | 7 +- drivers/block/loop.c | 15 +- drivers/block/nbd.c | 388 +- drivers/block/null_blk.c | 40 +- drivers/block/nvme-core.c | 339 +- drivers/block/pktcdvd.c | 59 +- drivers/block/ps3vram.c | 5 +- drivers/block/rbd.c | 136 +- drivers/block/rsxx/dev.c | 11 +- drivers/block/skd_main.c | 2 +- drivers/block/umem.c | 6 +- drivers/block/virtio_blk.c | 8 +- drivers/block/xen-blkback/blkback.c | 4 +- drivers/block/xen-blkfront.c | 149 +- drivers/block/zram/zram_drv.c | 39 +- drivers/block/zram/zram_drv.h | 1 - drivers/bluetooth/Kconfig | 18 + drivers/bluetooth/Makefile | 2 + drivers/bluetooth/ath3k.c | 4 + drivers/bluetooth/bfusb.c | 2 +- drivers/bluetooth/bt3c_cs.c | 2 +- drivers/bluetooth/btbcm.c | 8 +- drivers/bluetooth/btintel.c | 81 + drivers/bluetooth/btintel.h | 19 + drivers/bluetooth/btmrvl_drv.h | 6 +- drivers/bluetooth/btmrvl_sdio.c | 7 +- drivers/bluetooth/btqca.c | 392 + drivers/bluetooth/btqca.h | 135 + drivers/bluetooth/btusb.c | 103 +- drivers/bluetooth/dtl1_cs.c | 6 +- drivers/bluetooth/hci_bcm.c | 326 +- drivers/bluetooth/hci_h4.c | 9 +- drivers/bluetooth/hci_h5.c | 2 +- drivers/bluetooth/hci_intel.c | 856 ++ drivers/bluetooth/hci_ldisc.c | 14 +- drivers/bluetooth/hci_qca.c | 969 ++ drivers/bluetooth/hci_uart.h | 13 +- drivers/bus/Kconfig | 1 - drivers/bus/arm-ccn.c | 5 +- drivers/bus/mips_cdmm.c | 14 +- drivers/bus/vexpress-config.c | 2 +- drivers/char/agp/intel-gtt.c | 4 +- drivers/char/hw_random/xgene-rng.c | 7 +- drivers/char/ipmi/ipmi_bt_sm.c | 2 +- drivers/char/ipmi/ipmi_kcs_sm.c | 2 +- drivers/char/ipmi/ipmi_msghandler.c | 46 +- drivers/char/ipmi/ipmi_powernv.c | 10 +- drivers/char/ipmi/ipmi_si_intf.c | 597 +- drivers/char/ipmi/ipmi_si_sm.h | 10 +- drivers/char/ipmi/ipmi_smic_sm.c | 2 +- drivers/char/ipmi/ipmi_ssif.c | 14 +- drivers/char/misc.c | 17 +- drivers/char/nvram.c | 2 +- drivers/char/toshiba.c | 2 +- drivers/char/xillybus/xillybus_pcie.c | 10 +- drivers/clk/Makefile | 2 +- drivers/clk/at91/clk-main.c | 7 +- drivers/clk/at91/clk-master.c | 7 +- drivers/clk/at91/clk-peripheral.c | 6 +- drivers/clk/at91/clk-programmable.c | 40 +- drivers/clk/at91/clk-slow.c | 16 +- drivers/clk/at91/clk-smd.c | 7 +- drivers/clk/at91/clk-usb.c | 47 +- drivers/clk/at91/pmc.c | 1 - drivers/clk/at91/pmc.h | 124 +- drivers/clk/bcm/clk-iproc-pll.c | 13 +- drivers/clk/bcm/clk-kona.c | 53 +- drivers/clk/berlin/berlin2-pll.c | 4 +- drivers/clk/clk-axi-clkgen.c | 1 - drivers/clk/clk-bcm2835.c | 5 - drivers/clk/clk-cdce706.c | 3 +- drivers/clk/clk-cdce925.c | 1 + drivers/clk/clk-clps711x.c | 1 - drivers/clk/clk-composite.c | 61 +- drivers/clk/clk-divider.c | 28 +- drivers/clk/clk-efm32gg.c | 1 - drivers/clk/clk-fixed-factor.c | 5 +- drivers/clk/clk-fractional-divider.c | 8 + drivers/clk/clk-gate.c | 4 + drivers/clk/clk-gpio-gate.c | 207 - drivers/clk/clk-gpio.c | 325 + drivers/clk/clk-highbank.c | 1 + drivers/clk/clk-moxart.c | 1 + drivers/clk/clk-mux.c | 7 +- drivers/clk/clk-nomadik.c | 3 +- drivers/clk/clk-palmas.c | 1 - drivers/clk/clk-rk808.c | 1 - drivers/clk/clk-s2mps11.c | 32 +- drivers/clk/clk-si5351.c | 22 +- drivers/clk/clk-si570.c | 1 + drivers/clk/clk-stm32f4.c | 5 +- drivers/clk/clk-twl6040.c | 13 +- drivers/clk/clk-u300.c | 2 +- drivers/clk/clk-wm831x.c | 1 - drivers/clk/clk-xgene.c | 28 +- drivers/clk/clk.c | 349 +- drivers/clk/clkdev.c | 3 +- drivers/clk/h8300/clk-div.c | 4 +- drivers/clk/h8300/clk-h8s2678.c | 21 +- drivers/clk/hisilicon/Kconfig | 6 + drivers/clk/hisilicon/Makefile | 1 + drivers/clk/hisilicon/clk-hi3620.c | 41 +- drivers/clk/hisilicon/clk-hi6220-stub.c | 276 + drivers/clk/hisilicon/clk-hip04.c | 2 - drivers/clk/hisilicon/clk.c | 14 +- drivers/clk/hisilicon/clkgate-separated.c | 2 - drivers/clk/imx/Makefile | 1 + drivers/clk/imx/clk-imx1.c | 1 - drivers/clk/imx/clk-imx21.c | 1 - drivers/clk/imx/clk-imx31.c | 3 +- drivers/clk/imx/clk-imx35.c | 6 +- drivers/clk/imx/clk-imx6q.c | 7 + drivers/clk/imx/clk-imx6ul.c | 432 + drivers/clk/imx/clk-pfd.c | 1 - drivers/clk/imx/clk-pllv1.c | 1 - drivers/clk/imx/clk-pllv3.c | 1 - drivers/clk/ingenic/cgu.c | 1 + drivers/clk/keystone/gate.c | 1 - drivers/clk/keystone/pll.c | 4 +- drivers/clk/mediatek/clk-gate.h | 3 +- drivers/clk/mediatek/clk-mt8135.c | 1 + drivers/clk/mediatek/clk-mt8173.c | 25 +- drivers/clk/mediatek/clk-mtk.h | 9 +- drivers/clk/mediatek/clk-pll.c | 39 +- drivers/clk/meson/clk-cpu.c | 1 + drivers/clk/meson/clkc.c | 1 - drivers/clk/mmp/clk-apbc.c | 1 - drivers/clk/mmp/clk-apmu.c | 1 - drivers/clk/mmp/clk-gate.c | 3 +- drivers/clk/mmp/clk-mix.c | 71 +- drivers/clk/mmp/clk.c | 3 +- drivers/clk/mvebu/clk-cpu.c | 5 +- drivers/clk/mvebu/common.c | 2 +- drivers/clk/mxs/clk-div.c | 1 - drivers/clk/mxs/clk-frac.c | 1 - drivers/clk/mxs/clk-imx23.c | 3 +- drivers/clk/mxs/clk-imx28.c | 2 +- drivers/clk/mxs/clk-pll.c | 1 - drivers/clk/mxs/clk-ref.c | 1 - drivers/clk/mxs/clk.h | 3 +- drivers/clk/nxp/clk-lpc18xx-cgu.c | 1 - drivers/clk/pistachio/clk-pll.c | 4 +- drivers/clk/pistachio/clk.c | 1 + drivers/clk/qcom/clk-branch.c | 2 +- drivers/clk/qcom/clk-pll.c | 93 +- drivers/clk/qcom/clk-pll.h | 1 + drivers/clk/qcom/clk-rcg.c | 63 +- drivers/clk/qcom/clk-rcg2.c | 97 +- drivers/clk/qcom/common.c | 5 +- drivers/clk/qcom/gcc-apq8084.c | 12 +- drivers/clk/qcom/gcc-ipq806x.c | 10 +- drivers/clk/qcom/gcc-msm8660.c | 8 +- drivers/clk/qcom/gcc-msm8916.c | 24 +- drivers/clk/qcom/gcc-msm8960.c | 12 +- drivers/clk/qcom/gcc-msm8974.c | 4 +- drivers/clk/qcom/lcc-ipq806x.c | 6 +- drivers/clk/qcom/lcc-msm8960.c | 8 +- drivers/clk/qcom/mmcc-apq8084.c | 20 +- drivers/clk/qcom/mmcc-msm8960.c | 27 +- drivers/clk/qcom/mmcc-msm8974.c | 16 +- drivers/clk/rockchip/Makefile | 2 + drivers/clk/rockchip/clk-cpu.c | 1 + drivers/clk/rockchip/clk-inverter.c | 116 + drivers/clk/rockchip/clk-mmc-phase.c | 9 +- drivers/clk/rockchip/clk-pll.c | 100 +- drivers/clk/rockchip/clk-rk3188.c | 27 +- drivers/clk/rockchip/clk-rk3288.c | 14 +- drivers/clk/rockchip/clk-rk3368.c | 887 ++ drivers/clk/rockchip/clk.c | 7 + drivers/clk/rockchip/clk.h | 82 +- drivers/clk/samsung/clk-cpu.c | 7 +- drivers/clk/samsung/clk-exynos-audss.c | 3 +- drivers/clk/samsung/clk-exynos-clkout.c | 2 +- drivers/clk/samsung/clk-exynos3250.c | 34 +- drivers/clk/samsung/clk-exynos4.c | 52 +- drivers/clk/samsung/clk-exynos4415.c | 2 - drivers/clk/samsung/clk-exynos5250.c | 33 +- drivers/clk/samsung/clk-exynos5260.c | 2 - drivers/clk/samsung/clk-exynos5410.c | 2 - drivers/clk/samsung/clk-exynos5420.c | 3 +- drivers/clk/samsung/clk-exynos5433.c | 2 - drivers/clk/samsung/clk-exynos5440.c | 2 - drivers/clk/samsung/clk-exynos7.c | 2 - drivers/clk/samsung/clk-pll.c | 20 +- drivers/clk/samsung/clk-s3c2410-dclk.c | 6 +- drivers/clk/samsung/clk-s3c2410.c | 2 - drivers/clk/samsung/clk-s3c2412.c | 2 - drivers/clk/samsung/clk-s3c2443.c | 2 - drivers/clk/samsung/clk-s3c64xx.c | 3 +- drivers/clk/samsung/clk-s5pv210-audss.c | 2 +- drivers/clk/samsung/clk-s5pv210.c | 2 - drivers/clk/samsung/clk.c | 4 + drivers/clk/samsung/clk.h | 3 +- drivers/clk/shmobile/clk-div6.c | 8 +- drivers/clk/shmobile/clk-emev2.c | 6 + drivers/clk/shmobile/clk-mstp.c | 87 + drivers/clk/shmobile/clk-r8a73a4.c | 2 +- drivers/clk/shmobile/clk-r8a7740.c | 2 +- drivers/clk/shmobile/clk-r8a7778.c | 4 +- drivers/clk/shmobile/clk-r8a7779.c | 4 +- drivers/clk/shmobile/clk-rcar-gen2.c | 4 +- drivers/clk/shmobile/clk-rz.c | 3 + drivers/clk/shmobile/clk-sh73a0.c | 2 +- drivers/clk/sirf/clk-atlas6.c | 1 - drivers/clk/sirf/clk-atlas7.c | 25 +- drivers/clk/sirf/clk-common.c | 14 +- drivers/clk/sirf/clk-prima2.c | 1 - drivers/clk/socfpga/clk-gate-a10.c | 3 +- drivers/clk/socfpga/clk-gate.c | 5 +- drivers/clk/socfpga/clk-periph-a10.c | 3 +- drivers/clk/socfpga/clk-periph.c | 23 +- drivers/clk/socfpga/clk-pll-a10.c | 1 + drivers/clk/socfpga/clk-pll.c | 3 +- drivers/clk/socfpga/clk.h | 3 +- drivers/clk/spear/clk-vco-pll.c | 2 +- drivers/clk/spear/spear1310_clock.c | 1 - drivers/clk/spear/spear1340_clock.c | 1 - drivers/clk/spear/spear6xx_clock.c | 1 - drivers/clk/st/clk-flexgen.c | 13 +- drivers/clk/st/clkgen-fsyn.c | 27 +- drivers/clk/st/clkgen-mux.c | 93 +- drivers/clk/st/clkgen-pll.c | 21 +- drivers/clk/sunxi/Makefile | 1 + drivers/clk/sunxi/clk-a20-gmac.c | 4 +- drivers/clk/sunxi/clk-factors.c | 39 +- drivers/clk/sunxi/clk-mod0.c | 3 +- drivers/clk/sunxi/clk-simple-gates.c | 158 + drivers/clk/sunxi/clk-sun6i-ar100.c | 36 +- drivers/clk/sunxi/clk-sun8i-mbus.c | 2 +- drivers/clk/sunxi/clk-sun9i-core.c | 2 +- drivers/clk/sunxi/clk-sun9i-mmc.c | 3 +- drivers/clk/sunxi/clk-sunxi.c | 227 +- drivers/clk/sunxi/clk-usb.c | 3 +- drivers/clk/tegra/Makefile | 3 + drivers/clk/tegra/clk-dfll.c | 1763 +++ drivers/clk/tegra/clk-dfll.h | 54 + drivers/clk/tegra/clk-divider.c | 1 - drivers/clk/tegra/clk-emc.c | 36 +- drivers/clk/tegra/clk-periph-gate.c | 1 - drivers/clk/tegra/clk-periph.c | 1 - drivers/clk/tegra/clk-pll-out.c | 1 - drivers/clk/tegra/clk-pll.c | 20 +- drivers/clk/tegra/clk-super.c | 1 - drivers/clk/tegra/clk-tegra-audio.c | 1 - drivers/clk/tegra/clk-tegra-fixed.c | 1 - drivers/clk/tegra/clk-tegra-periph.c | 1 - drivers/clk/tegra/clk-tegra-pmc.c | 1 - drivers/clk/tegra/clk-tegra-super-gen4.c | 5 +- drivers/clk/tegra/clk-tegra114.c | 2 - drivers/clk/tegra/clk-tegra124-dfll-fcpu.c | 166 + drivers/clk/tegra/clk-tegra124.c | 83 +- drivers/clk/tegra/clk-tegra20.c | 1 - drivers/clk/tegra/clk-tegra30.c | 1 - drivers/clk/tegra/clk.c | 40 +- drivers/clk/tegra/clk.h | 3 + drivers/clk/tegra/cvb.c | 140 + drivers/clk/tegra/cvb.h | 67 + drivers/clk/ti/Makefile | 19 +- drivers/clk/ti/apll.c | 11 +- drivers/clk/ti/autoidle.c | 115 +- drivers/clk/ti/clk-2xxx.c | 4 +- drivers/clk/ti/clk-33xx.c | 3 + drivers/clk/ti/clk-3xxx-legacy.c | 1 + drivers/clk/ti/clk-3xxx.c | 235 + drivers/clk/ti/clk-43xx.c | 4 + drivers/clk/ti/clk-44xx.c | 2 + drivers/clk/ti/clk-54xx.c | 2 + drivers/clk/ti/clk-7xx.c | 3 +- drivers/clk/ti/clk-814x.c | 33 + drivers/clk/ti/clk-816x.c | 4 +- drivers/clk/ti/clk-dra7-atl.c | 1 + drivers/clk/ti/clk.c | 154 +- drivers/clk/ti/clkt_dflt.c | 316 + drivers/clk/ti/clkt_dpll.c | 370 + drivers/clk/ti/clkt_iclk.c | 101 + drivers/clk/ti/clock.h | 105 + drivers/clk/ti/clockdomain.c | 83 +- drivers/clk/ti/composite.c | 4 +- drivers/clk/ti/divider.c | 8 +- drivers/clk/ti/dpll.c | 9 +- drivers/clk/ti/dpll3xxx.c | 817 ++ drivers/clk/ti/dpll44xx.c | 227 + drivers/clk/ti/fapll.c | 4 +- drivers/clk/ti/fixed-factor.c | 2 + drivers/clk/ti/gate.c | 6 +- drivers/clk/ti/interface.c | 2 +- drivers/clk/ti/mux.c | 6 +- drivers/clk/ux500/Makefile | 1 - drivers/clk/ux500/abx500-clk.c | 1 - drivers/clk/ux500/clk-prcmu.c | 16 +- drivers/clk/ux500/clk-sysctrl.c | 2 +- drivers/clk/ux500/clk.h | 3 +- drivers/clk/ux500/u8500_clk.c | 526 - drivers/clk/ux500/u8500_of_clk.c | 165 +- drivers/clk/ux500/u8540_clk.c | 198 +- drivers/clk/ux500/u9540_clk.c | 5 +- drivers/clk/versatile/clk-icst.c | 9 +- drivers/clk/versatile/clk-impd1.c | 1 - drivers/clk/versatile/clk-realview.c | 5 +- drivers/clk/versatile/clk-sp810.c | 79 +- drivers/clk/versatile/clk-versatile.c | 4 +- drivers/clk/zte/Makefile | 2 +- drivers/clk/zte/clk-pll.c | 172 - drivers/clk/zte/clk-zx296702.c | 126 +- drivers/clk/zte/clk.c | 309 + drivers/clk/zte/clk.h | 9 + drivers/clk/zynq/Makefile | 2 +- drivers/clk/zynq/clkc.c | 1 + drivers/clocksource/Kconfig | 14 +- drivers/clocksource/Makefile | 2 + drivers/clocksource/arm_arch_timer.c | 52 +- drivers/clocksource/arm_global_timer.c | 46 +- drivers/clocksource/asm9260_timer.c | 64 +- drivers/clocksource/bcm2835_timer.c | 16 - drivers/clocksource/bcm_kona_timer.c | 17 +- drivers/clocksource/cadence_ttc_timer.c | 60 +- drivers/clocksource/clksrc_st_lpc.c | 131 + drivers/clocksource/clps711x-timer.c | 6 - drivers/clocksource/cs5535-clockevt.c | 24 +- drivers/clocksource/dummy_timer.c | 10 - drivers/clocksource/dw_apb_timer.c | 146 +- drivers/clocksource/em_sti.c | 39 +- drivers/clocksource/exynos_mct.c | 101 +- drivers/clocksource/fsl_ftm_timer.c | 37 +- drivers/clocksource/h8300_timer8.c | 51 +- drivers/clocksource/i8253.c | 77 +- drivers/clocksource/meson6_timer.c | 50 +- drivers/clocksource/metag_generic.c | 20 - drivers/clocksource/mips-gic-timer.c | 72 +- drivers/clocksource/moxart_timer.c | 49 +- drivers/clocksource/mtk_timer.c | 32 +- drivers/clocksource/mxs_timer.c | 80 +- drivers/clocksource/nomadik-mtu.c | 58 +- drivers/clocksource/pxa_timer.c | 39 +- drivers/clocksource/qcom-timer.c | 24 +- drivers/clocksource/rockchip_timer.c | 34 +- drivers/clocksource/samsung_pwm_timer.c | 43 +- drivers/clocksource/sh_cmt.c | 65 +- drivers/clocksource/sh_mtu2.c | 44 +- drivers/clocksource/sh_tmu.c | 64 +- drivers/clocksource/sun4i_timer.c | 41 +- drivers/clocksource/tcb_clksrc.c | 93 +- drivers/clocksource/tegra20_timer.c | 45 +- drivers/clocksource/time-armada-370-xp.c | 53 +- drivers/clocksource/time-efm32.c | 66 +- drivers/clocksource/time-orion.c | 46 +- drivers/clocksource/time-pistachio.c | 218 + drivers/clocksource/timer-atlas7.c | 19 +- drivers/clocksource/timer-atmel-pit.c | 45 +- drivers/clocksource/timer-atmel-st.c | 69 +- drivers/clocksource/timer-digicolor.c | 43 +- drivers/clocksource/timer-imx-gpt.c | 75 +- drivers/clocksource/timer-integrator-ap.c | 58 +- drivers/clocksource/timer-keystone.c | 46 +- drivers/clocksource/timer-prima2.c | 36 +- drivers/clocksource/timer-sp804.c | 54 +- drivers/clocksource/timer-stm32.c | 30 +- drivers/clocksource/timer-sun5i.c | 45 +- drivers/clocksource/timer-u300.c | 155 +- drivers/clocksource/vf_pit_timer.c | 29 +- drivers/clocksource/vt8500_timer.c | 29 +- drivers/clocksource/zevio-timer.c | 44 +- drivers/cpufreq/Kconfig.arm | 70 +- drivers/cpufreq/Makefile | 8 +- drivers/cpufreq/acpi-cpufreq.c | 92 +- drivers/cpufreq/cpufreq-dt.c | 85 +- drivers/cpufreq/cpufreq.c | 447 +- drivers/cpufreq/cpufreq_conservative.c | 31 +- drivers/cpufreq/cpufreq_governor.c | 196 +- drivers/cpufreq/cpufreq_governor.h | 40 +- drivers/cpufreq/cpufreq_ondemand.c | 72 +- drivers/cpufreq/cpufreq_opp.c | 4 + drivers/cpufreq/e_powersaver.c | 2 +- drivers/cpufreq/exynos-cpufreq.c | 239 - drivers/cpufreq/exynos-cpufreq.h | 89 - drivers/cpufreq/exynos4x12-cpufreq.c | 236 - drivers/cpufreq/exynos5250-cpufreq.c | 210 - drivers/cpufreq/freq_table.c | 15 + drivers/cpufreq/ia64-acpi-cpufreq.c | 20 +- drivers/cpufreq/integrator-cpufreq.c | 18 +- drivers/cpufreq/intel_pstate.c | 74 +- drivers/cpufreq/mt8173-cpufreq.c | 527 + drivers/cpufreq/powernow-k7.c | 4 +- drivers/cpufreq/powernow-k8.c | 5 +- drivers/cpufreq/powernv-cpufreq.c | 199 +- drivers/cpufreq/ppc_cbe_cpufreq_pmi.c | 4 +- drivers/cpufreq/sfi-cpufreq.c | 4 +- drivers/cpufreq/speedstep-lib.c | 9 +- drivers/cpufreq/tegra-cpufreq.c | 218 - drivers/cpufreq/tegra124-cpufreq.c | 214 + drivers/cpufreq/tegra20-cpufreq.c | 218 + drivers/cpuidle/coupled.c | 30 +- drivers/cpuidle/cpuidle-calxeda.c | 15 +- drivers/cpuidle/cpuidle.c | 8 +- drivers/cpuidle/cpuidle.h | 13 +- drivers/cpuidle/driver.c | 4 + drivers/crypto/Kconfig | 19 +- drivers/crypto/Makefile | 1 + drivers/crypto/amcc/crypto4xx_core.c | 2 +- drivers/crypto/bfin_crc.c | 3 +- drivers/crypto/caam/Kconfig | 10 +- drivers/crypto/caam/caamalg.c | 2877 ++--- drivers/crypto/caam/caamhash.c | 69 +- drivers/crypto/caam/caamrng.c | 26 +- drivers/crypto/caam/compat.h | 1 + drivers/crypto/caam/ctrl.c | 154 +- drivers/crypto/caam/desc.h | 23 +- drivers/crypto/caam/desc_constr.h | 2 +- drivers/crypto/caam/intern.h | 5 + drivers/crypto/caam/jr.c | 30 +- drivers/crypto/caam/regs.h | 64 +- drivers/crypto/caam/sg_sw_sec4.h | 25 +- drivers/crypto/ccp/ccp-platform.c | 2 + drivers/crypto/img-hash.c | 2 +- drivers/crypto/ixp4xx_crypto.c | 312 +- drivers/crypto/marvell/cesa.c | 1 - drivers/crypto/nx/Kconfig | 17 +- drivers/crypto/nx/Makefile | 8 +- drivers/crypto/nx/nx-842-crypto.c | 580 - drivers/crypto/nx/nx-842-platform.c | 84 - drivers/crypto/nx/nx-842-powernv.c | 42 +- drivers/crypto/nx/nx-842-pseries.c | 139 +- drivers/crypto/nx/nx-842.c | 554 +- drivers/crypto/nx/nx-842.h | 65 +- drivers/crypto/nx/nx-aes-ccm.c | 151 +- drivers/crypto/nx/nx-aes-ctr.c | 21 - drivers/crypto/nx/nx-aes-gcm.c | 64 +- drivers/crypto/nx/nx.c | 30 +- drivers/crypto/nx/nx.h | 9 +- drivers/crypto/omap-aes.c | 86 +- drivers/crypto/omap-sham.c | 2 +- drivers/crypto/picoxcell_crypto.c | 677 +- drivers/crypto/qat/Kconfig | 15 + drivers/crypto/qat/Makefile | 1 + drivers/crypto/qat/qat_common/.gitignore | 1 + drivers/crypto/qat/qat_common/Makefile | 8 + drivers/crypto/qat/qat_common/adf_accel_devices.h | 46 +- drivers/crypto/qat/qat_common/adf_accel_engine.c | 42 +- drivers/crypto/qat/qat_common/adf_admin.c | 290 + drivers/crypto/qat/qat_common/adf_aer.c | 8 +- drivers/crypto/qat/qat_common/adf_cfg.c | 9 +- drivers/crypto/qat/qat_common/adf_cfg_common.h | 3 +- drivers/crypto/qat/qat_common/adf_common_drv.h | 53 +- drivers/crypto/qat/qat_common/adf_ctl_drv.c | 6 +- drivers/crypto/qat/qat_common/adf_dev_mgr.c | 286 +- drivers/crypto/qat/qat_common/adf_hw_arbiter.c | 168 + drivers/crypto/qat/qat_common/adf_init.c | 104 +- drivers/crypto/qat/qat_common/adf_pf2vf_msg.c | 438 + drivers/crypto/qat/qat_common/adf_pf2vf_msg.h | 146 + drivers/crypto/qat/qat_common/adf_sriov.c | 309 + drivers/crypto/qat/qat_common/adf_transport.c | 13 +- .../qat/qat_common/adf_transport_access_macros.h | 5 +- .../crypto/qat/qat_common/adf_transport_debug.c | 16 +- drivers/crypto/qat/qat_common/icp_qat_fw.h | 2 + drivers/crypto/qat/qat_common/icp_qat_fw_pke.h | 112 + drivers/crypto/qat/qat_common/qat_algs.c | 338 +- drivers/crypto/qat/qat_common/qat_asym_algs.c | 652 ++ drivers/crypto/qat/qat_common/qat_crypto.c | 26 +- drivers/crypto/qat/qat_common/qat_crypto.h | 2 - drivers/crypto/qat/qat_common/qat_hal.c | 14 +- drivers/crypto/qat/qat_common/qat_rsakey.asn1 | 5 + drivers/crypto/qat/qat_common/qat_uclo.c | 27 +- drivers/crypto/qat/qat_dh895xcc/Makefile | 5 +- drivers/crypto/qat/qat_dh895xcc/adf_admin.c | 145 - .../crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | 38 +- .../crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h | 12 +- drivers/crypto/qat/qat_dh895xcc/adf_drv.c | 97 +- drivers/crypto/qat/qat_dh895xcc/adf_drv.h | 9 - drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c | 159 - drivers/crypto/qat/qat_dh895xcc/adf_isr.c | 139 +- drivers/crypto/qat/qat_dh895xcc/qat_admin.c | 107 - drivers/crypto/qat/qat_dh895xccvf/Makefile | 5 + .../qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c | 172 + .../qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h | 68 + drivers/crypto/qat/qat_dh895xccvf/adf_drv.c | 393 + drivers/crypto/qat/qat_dh895xccvf/adf_drv.h | 57 + drivers/crypto/qat/qat_dh895xccvf/adf_isr.c | 258 + drivers/crypto/qce/sha.c | 2 +- drivers/crypto/sahara.c | 48 +- drivers/crypto/sunxi-ss/Makefile | 2 + drivers/crypto/sunxi-ss/sun4i-ss-cipher.c | 542 + drivers/crypto/sunxi-ss/sun4i-ss-core.c | 425 + drivers/crypto/sunxi-ss/sun4i-ss-hash.c | 492 + drivers/crypto/sunxi-ss/sun4i-ss.h | 201 + drivers/crypto/talitos.c | 620 +- drivers/crypto/talitos.h | 8 +- drivers/crypto/vmx/ppc-xlate.pl | 1 + drivers/devfreq/devfreq.c | 19 +- drivers/devfreq/event/exynos-ppmu.c | 171 +- drivers/devfreq/event/exynos-ppmu.h | 70 + drivers/devfreq/governor_simpleondemand.c | 33 +- drivers/devfreq/tegra-devfreq.c | 8 +- drivers/dma/Kconfig | 601 +- drivers/dma/Makefile | 87 +- drivers/dma/amba-pl08x.c | 192 +- drivers/dma/at_hdmac.c | 127 +- drivers/dma/at_hdmac_regs.h | 6 + drivers/dma/at_xdmac.c | 178 +- drivers/dma/coh901318.c | 2 +- drivers/dma/dma-axi-dmac.c | 691 ++ drivers/dma/dma-jz4780.c | 124 +- drivers/dma/dmaengine.c | 10 +- drivers/dma/dw/Kconfig | 6 +- drivers/dma/edma.c | 2 +- drivers/dma/hsu/hsu.c | 39 +- drivers/dma/hsu/hsu.h | 1 - drivers/dma/idma64.c | 710 ++ drivers/dma/idma64.h | 233 + drivers/dma/imx-dma.c | 25 +- drivers/dma/imx-sdma.c | 254 +- drivers/dma/ioat/Makefile | 2 +- drivers/dma/ioat/dca.c | 374 +- drivers/dma/ioat/dma.c | 1655 ++- drivers/dma/ioat/dma.h | 353 +- drivers/dma/ioat/dma_v2.c | 916 -- drivers/dma/ioat/dma_v2.h | 175 - drivers/dma/ioat/dma_v3.c | 1717 --- drivers/dma/ioat/hw.h | 16 +- drivers/dma/ioat/init.c | 1314 +++ drivers/dma/ioat/pci.c | 258 - drivers/dma/ioat/prep.c | 715 ++ drivers/dma/ioat/sysfs.c | 135 + drivers/dma/iop-adma.c | 9 +- drivers/dma/ipu/ipu_irq.c | 64 +- drivers/dma/k3dma.c | 3 +- drivers/dma/lpc18xx-dmamux.c | 183 + drivers/dma/mic_x100_dma.h | 2 +- drivers/dma/mmp_pdma.c | 3 +- drivers/dma/mmp_tdma.c | 3 +- drivers/dma/mv_xor.c | 69 +- drivers/dma/pch_dma.c | 4 - drivers/dma/pl330.c | 3 + drivers/dma/pxa_dma.c | 45 +- drivers/dma/sh/Kconfig | 24 +- drivers/dma/sh/Makefile | 4 +- drivers/dma/sirf-dma.c | 3 + drivers/dma/ste_dma40.c | 2 +- drivers/dma/sun4i-dma.c | 1288 +++ drivers/dma/sun6i-dma.c | 2 +- drivers/dma/tegra20-apb-dma.c | 63 +- drivers/dma/ti-dma-crossbar.c | 41 +- drivers/dma/timb_dma.c | 4 - drivers/dma/xgene-dma.c | 112 +- drivers/dma/zx296702_dma.c | 951 ++ drivers/edac/Kconfig | 10 - drivers/edac/Makefile | 1 - drivers/edac/mce_amd.c | 3 +- drivers/edac/mce_amd_inj.c | 375 - drivers/edac/sb_edac.c | 72 +- drivers/edac/xgene_edac.c | 1 - drivers/extcon/extcon-arizona.c | 101 +- drivers/extcon/extcon-gpio.c | 18 - drivers/extcon/extcon-max77693.c | 94 +- drivers/extcon/extcon-max77843.c | 75 +- drivers/extcon/extcon-palmas.c | 134 +- drivers/extcon/extcon-rt8973a.c | 1 - drivers/extcon/extcon-sm5502.c | 1 - drivers/extcon/extcon-usb-gpio.c | 1 + drivers/extcon/extcon.c | 40 +- drivers/firmware/Kconfig | 11 + drivers/firmware/Makefile | 4 +- drivers/firmware/efi/Kconfig | 2 +- drivers/firmware/efi/libstub/efistub.h | 4 - drivers/firmware/psci.c | 382 + drivers/firmware/qcom_scm-32.c | 4 +- drivers/firmware/qcom_scm-64.c | 63 + drivers/gpio/Kconfig | 12 +- drivers/gpio/Makefile | 2 + drivers/gpio/devres.c | 18 +- drivers/gpio/gpio-74xx-mmio.c | 2 +- drivers/gpio/gpio-adp5588.c | 10 +- drivers/gpio/gpio-altera.c | 10 +- drivers/gpio/gpio-ath79.c | 204 + drivers/gpio/gpio-bcm-kona.c | 19 +- drivers/gpio/gpio-brcmstb.c | 306 +- drivers/gpio/gpio-davinci.c | 17 +- drivers/gpio/gpio-dwapb.c | 4 +- drivers/gpio/gpio-em.c | 35 +- drivers/gpio/gpio-ep93xx.c | 9 +- drivers/gpio/gpio-etraxfs.c | 329 +- drivers/gpio/gpio-generic.c | 41 +- drivers/gpio/gpio-grgpio.c | 23 +- drivers/gpio/gpio-intel-mid.c | 2 +- drivers/gpio/gpio-lynxpoint.c | 2 +- drivers/gpio/gpio-max732x.c | 11 +- drivers/gpio/gpio-mcp23s08.c | 4 - drivers/gpio/gpio-mpc8xxx.c | 123 +- drivers/gpio/gpio-msic.c | 2 +- drivers/gpio/gpio-msm-v2.c | 25 +- drivers/gpio/gpio-mvebu.c | 8 +- drivers/gpio/gpio-mxc.c | 23 +- drivers/gpio/gpio-mxs.c | 17 +- drivers/gpio/gpio-omap.c | 167 +- drivers/gpio/gpio-pcf857x.c | 14 +- drivers/gpio/gpio-pch.c | 4 +- drivers/gpio/gpio-pl061.c | 2 +- drivers/gpio/gpio-pxa.c | 10 +- drivers/gpio/gpio-rcar.c | 24 +- drivers/gpio/gpio-sa1100.c | 7 +- drivers/gpio/gpio-sta2x11.c | 2 +- drivers/gpio/gpio-sx150x.c | 1 - drivers/gpio/gpio-tc3589x.c | 10 +- drivers/gpio/gpio-tegra.c | 11 +- drivers/gpio/gpio-timberdale.c | 14 +- drivers/gpio/gpio-tz1090.c | 8 +- drivers/gpio/gpio-vf610.c | 13 +- drivers/gpio/gpio-xlp.c | 2 +- drivers/gpio/gpio-zx.c | 324 + drivers/gpio/gpio-zynq.c | 14 +- drivers/gpio/gpiolib-acpi.c | 2 +- drivers/gpio/gpiolib-of.c | 34 +- drivers/gpio/gpiolib.c | 118 +- drivers/gpu/drm/Kconfig | 28 +- drivers/gpu/drm/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/Makefile | 22 +- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 221 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 269 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 65 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 670 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 543 + drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | 12 +- drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c | 14 +- drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 800 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 391 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 213 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 45 +- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 77 +- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 168 +- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_family.h | 62 - drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 65 +- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 587 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 5 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 24 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 40 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c | 11 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 5 + drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 35 +- drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 45 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 19 +- drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 5 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 32 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | 79 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 113 + drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 216 +- drivers/gpu/drm/amd/amdgpu/amdgpu_test.c | 59 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 78 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 152 +- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 186 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 338 +- drivers/gpu/drm/amd/amdgpu/atom-bits.h | 48 - drivers/gpu/drm/amd/amdgpu/atom-names.h | 100 - drivers/gpu/drm/amd/amdgpu/atom-types.h | 42 - drivers/gpu/drm/amd/amdgpu/atombios.h | 8555 --------------- drivers/gpu/drm/amd/amdgpu/atombios_encoders.c | 2 +- drivers/gpu/drm/amd/amdgpu/cik.c | 23 +- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 95 +- drivers/gpu/drm/amd/amdgpu/cikd.h | 7 + drivers/gpu/drm/amd/amdgpu/cz_dpm.c | 4 +- drivers/gpu/drm/amd/amdgpu/cz_smc.c | 6 +- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 27 +- drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/fiji_dpm.c | 181 + drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h | 182 + drivers/gpu/drm/amd/amdgpu/fiji_smc.c | 857 ++ drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h | 42 + drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 120 +- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 227 +- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 34 +- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 57 +- drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h | 5 + drivers/gpu/drm/amd/amdgpu/iceland_smc.c | 4 +- drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 3 + drivers/gpu/drm/amd/amdgpu/pptable.h | 698 -- drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 98 +- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 125 +- drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h | 5 + drivers/gpu/drm/amd/amdgpu/tonga_smc.c | 8 +- drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 7 +- drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 7 +- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 27 +- drivers/gpu/drm/amd/amdgpu/vce_v2_0.c | 1 + drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 58 +- drivers/gpu/drm/amd/amdgpu/vi.c | 120 +- drivers/gpu/drm/amd/amdgpu/vi_dpm.h | 2 +- drivers/gpu/drm/amd/amdgpu/vid.h | 5 + drivers/gpu/drm/amd/amdkfd/Kconfig | 2 +- drivers/gpu/drm/amd/amdkfd/Makefile | 3 +- drivers/gpu/drm/amd/amdkfd/cik_regs.h | 11 - drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 7 +- .../drm/amd/amdkfd/kfd_device_queue_manager_cik.c | 12 +- .../drm/amd/amdkfd/kfd_device_queue_manager_vi.c | 103 +- drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 20 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 249 +- drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 99 +- drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h | 398 + drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 5 + drivers/gpu/drm/amd/amdkfd/kfd_topology.h | 1 + drivers/gpu/drm/amd/include/amd_shared.h | 39 + .../gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h | 1246 +++ .../drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h | 1282 +++ .../amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h | 6080 +++++++++++ drivers/gpu/drm/amd/include/atom-bits.h | 48 + drivers/gpu/drm/amd/include/atom-names.h | 100 + drivers/gpu/drm/amd/include/atom-types.h | 42 + drivers/gpu/drm/amd/include/atombios.h | 8555 +++++++++++++++ drivers/gpu/drm/amd/include/cgs_common.h | 624 ++ drivers/gpu/drm/amd/include/cgs_linux.h | 118 + drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 3 +- drivers/gpu/drm/amd/include/pptable.h | 702 ++ drivers/gpu/drm/amd/include/vi_structs.h | 417 + drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h | 41 + drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 425 + drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 133 + drivers/gpu/drm/amd/scheduler/sched_fence.c | 81 + drivers/gpu/drm/armada/armada_fbdev.c | 33 +- drivers/gpu/drm/ast/ast_fb.c | 48 +- drivers/gpu/drm/ast/ast_main.c | 16 +- drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c | 6 +- drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c | 216 +- drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c | 4 + drivers/gpu/drm/bochs/bochs_drv.c | 4 +- drivers/gpu/drm/bochs/bochs_fbdev.c | 36 +- drivers/gpu/drm/bochs/bochs_mm.c | 16 +- drivers/gpu/drm/bridge/Kconfig | 24 +- drivers/gpu/drm/bridge/Makefile | 4 +- drivers/gpu/drm/bridge/dw_hdmi.c | 387 +- drivers/gpu/drm/bridge/dw_hdmi.h | 8 +- drivers/gpu/drm/bridge/nxp-ptn3460.c | 411 + drivers/gpu/drm/bridge/parade-ps8622.c | 679 ++ drivers/gpu/drm/bridge/ps8622.c | 679 -- drivers/gpu/drm/bridge/ptn3460.c | 411 - drivers/gpu/drm/cirrus/cirrus_drv.c | 4 +- drivers/gpu/drm/cirrus/cirrus_fbdev.c | 41 +- drivers/gpu/drm/cirrus/cirrus_main.c | 15 +- drivers/gpu/drm/drm_atomic.c | 97 +- drivers/gpu/drm/drm_atomic_helper.c | 129 +- drivers/gpu/drm/drm_context.c | 51 +- drivers/gpu/drm/drm_crtc.c | 233 +- drivers/gpu/drm/drm_crtc_helper.c | 75 +- drivers/gpu/drm/drm_dp_helper.c | 99 +- drivers/gpu/drm/drm_dp_mst_topology.c | 61 +- drivers/gpu/drm/drm_drv.c | 19 +- drivers/gpu/drm/drm_edid.c | 4 +- drivers/gpu/drm/drm_fb_cma_helper.c | 63 +- drivers/gpu/drm/drm_fb_helper.c | 385 +- drivers/gpu/drm/drm_gem.c | 13 +- drivers/gpu/drm/drm_gem_cma_helper.c | 10 +- drivers/gpu/drm/drm_ioc32.c | 55 +- drivers/gpu/drm/drm_ioctl.c | 6 +- drivers/gpu/drm/drm_irq.c | 332 +- drivers/gpu/drm/drm_legacy.h | 2 +- drivers/gpu/drm/drm_modeset_lock.c | 59 +- drivers/gpu/drm/drm_of.c | 2 +- drivers/gpu/drm/drm_plane_helper.c | 23 +- drivers/gpu/drm/drm_probe_helper.c | 58 +- drivers/gpu/drm/exynos/Kconfig | 3 +- drivers/gpu/drm/exynos/Makefile | 7 +- drivers/gpu/drm/exynos/exynos5433_drm_decon.c | 113 +- drivers/gpu/drm/exynos/exynos7_drm_decon.c | 159 +- drivers/gpu/drm/exynos/exynos_dp_core.c | 142 +- drivers/gpu/drm/exynos/exynos_dp_core.h | 3 +- drivers/gpu/drm/exynos/exynos_drm_buf.c | 186 - drivers/gpu/drm/exynos/exynos_drm_buf.h | 33 - drivers/gpu/drm/exynos/exynos_drm_core.c | 42 - drivers/gpu/drm/exynos/exynos_drm_crtc.c | 101 +- drivers/gpu/drm/exynos/exynos_drm_crtc.h | 6 +- drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | 286 - drivers/gpu/drm/exynos/exynos_drm_dmabuf.h | 20 - drivers/gpu/drm/exynos/exynos_drm_dpi.c | 111 +- drivers/gpu/drm/exynos/exynos_drm_drv.c | 226 +- drivers/gpu/drm/exynos/exynos_drm_drv.h | 138 +- drivers/gpu/drm/exynos/exynos_drm_dsi.c | 138 +- drivers/gpu/drm/exynos/exynos_drm_encoder.c | 174 - drivers/gpu/drm/exynos/exynos_drm_encoder.h | 23 - drivers/gpu/drm/exynos/exynos_drm_fb.c | 164 +- drivers/gpu/drm/exynos/exynos_drm_fb.h | 16 +- drivers/gpu/drm/exynos/exynos_drm_fbdev.c | 129 +- drivers/gpu/drm/exynos/exynos_drm_fimc.c | 36 +- drivers/gpu/drm/exynos/exynos_drm_fimd.c | 196 +- drivers/gpu/drm/exynos/exynos_drm_g2d.c | 157 +- drivers/gpu/drm/exynos/exynos_drm_gem.c | 481 +- drivers/gpu/drm/exynos/exynos_drm_gem.h | 64 +- drivers/gpu/drm/exynos/exynos_drm_gsc.c | 16 + drivers/gpu/drm/exynos/exynos_drm_iommu.c | 20 +- drivers/gpu/drm/exynos/exynos_drm_iommu.h | 15 - drivers/gpu/drm/exynos/exynos_drm_ipp.c | 16 +- drivers/gpu/drm/exynos/exynos_drm_plane.c | 56 +- drivers/gpu/drm/exynos/exynos_drm_plane.h | 1 + drivers/gpu/drm/exynos/exynos_drm_rotator.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_vidi.c | 124 +- drivers/gpu/drm/exynos/exynos_hdmi.c | 1014 +- drivers/gpu/drm/exynos/exynos_mixer.c | 223 +- drivers/gpu/drm/fsl-dcu/Kconfig | 18 + drivers/gpu/drm/fsl-dcu/Makefile | 7 + drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c | 210 + drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h | 19 + drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c | 404 + drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h | 197 + drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c | 23 + drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c | 43 + drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h | 33 + drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c | 262 + drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h | 17 + drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c | 182 + drivers/gpu/drm/gma500/accel_2d.c | 6 +- drivers/gpu/drm/gma500/framebuffer.c | 48 +- drivers/gpu/drm/i2c/adv7511.c | 2 +- drivers/gpu/drm/i915/Kconfig | 24 - drivers/gpu/drm/i915/Makefile | 21 +- drivers/gpu/drm/i915/dvo_ivch.c | 63 +- drivers/gpu/drm/i915/i915_cmd_parser.c | 10 +- drivers/gpu/drm/i915/i915_debugfs.c | 342 +- drivers/gpu/drm/i915/i915_dma.c | 42 +- drivers/gpu/drm/i915/i915_drv.c | 76 +- drivers/gpu/drm/i915/i915_drv.h | 301 +- drivers/gpu/drm/i915/i915_gem.c | 863 +- drivers/gpu/drm/i915/i915_gem_context.c | 94 +- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 160 +- drivers/gpu/drm/i915/i915_gem_fence.c | 787 ++ drivers/gpu/drm/i915/i915_gem_gtt.c | 732 +- drivers/gpu/drm/i915/i915_gem_gtt.h | 64 +- drivers/gpu/drm/i915/i915_gem_render_state.c | 70 +- drivers/gpu/drm/i915/i915_gem_render_state.h | 4 +- drivers/gpu/drm/i915/i915_gem_shrinker.c | 2 +- drivers/gpu/drm/i915/i915_gem_stolen.c | 306 +- drivers/gpu/drm/i915/i915_gem_tiling.c | 303 +- drivers/gpu/drm/i915/i915_gem_userptr.c | 5 +- drivers/gpu/drm/i915/i915_gpu_error.c | 5 + drivers/gpu/drm/i915/i915_guc_reg.h | 102 + drivers/gpu/drm/i915/i915_ioc32.c | 138 +- drivers/gpu/drm/i915/i915_irq.c | 573 +- drivers/gpu/drm/i915/i915_params.c | 24 +- drivers/gpu/drm/i915/i915_reg.h | 181 +- drivers/gpu/drm/i915/i915_suspend.c | 2 +- drivers/gpu/drm/i915/i915_sysfs.c | 22 +- drivers/gpu/drm/i915/i915_trace.h | 16 +- drivers/gpu/drm/i915/intel_atomic.c | 220 +- drivers/gpu/drm/i915/intel_atomic_plane.c | 41 +- drivers/gpu/drm/i915/intel_audio.c | 32 +- drivers/gpu/drm/i915/intel_bios.c | 232 +- drivers/gpu/drm/i915/intel_bios.h | 29 +- drivers/gpu/drm/i915/intel_crt.c | 49 +- drivers/gpu/drm/i915/intel_csr.c | 22 +- drivers/gpu/drm/i915/intel_ddi.c | 1106 +- drivers/gpu/drm/i915/intel_display.c | 4410 ++++---- drivers/gpu/drm/i915/intel_dp.c | 442 +- drivers/gpu/drm/i915/intel_dp_mst.c | 50 +- drivers/gpu/drm/i915/intel_drv.h | 162 +- drivers/gpu/drm/i915/intel_dsi.c | 51 +- drivers/gpu/drm/i915/intel_dsi.h | 3 + drivers/gpu/drm/i915/intel_dsi_pll.c | 97 +- drivers/gpu/drm/i915/intel_dvo.c | 46 +- drivers/gpu/drm/i915/intel_fbc.c | 540 +- drivers/gpu/drm/i915/intel_fbdev.c | 110 +- drivers/gpu/drm/i915/intel_frontbuffer.c | 117 +- drivers/gpu/drm/i915/intel_guc_fwif.h | 245 + drivers/gpu/drm/i915/intel_hdmi.c | 446 +- drivers/gpu/drm/i915/intel_hotplug.c | 508 + drivers/gpu/drm/i915/intel_lrc.c | 931 +- drivers/gpu/drm/i915/intel_lrc.h | 23 +- drivers/gpu/drm/i915/intel_lvds.c | 74 +- drivers/gpu/drm/i915/intel_mocs.c | 335 + drivers/gpu/drm/i915/intel_mocs.h | 57 + drivers/gpu/drm/i915/intel_opregion.c | 104 +- drivers/gpu/drm/i915/intel_overlay.c | 63 +- drivers/gpu/drm/i915/intel_panel.c | 94 +- drivers/gpu/drm/i915/intel_pm.c | 839 +- drivers/gpu/drm/i915/intel_psr.c | 81 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 410 +- drivers/gpu/drm/i915/intel_ringbuffer.h | 92 +- drivers/gpu/drm/i915/intel_runtime_pm.c | 118 +- drivers/gpu/drm/i915/intel_sdvo.c | 47 +- drivers/gpu/drm/i915/intel_sprite.c | 205 +- drivers/gpu/drm/i915/intel_tv.c | 2 +- drivers/gpu/drm/i915/intel_uncore.c | 74 +- drivers/gpu/drm/imx/dw_hdmi-imx.c | 5 + drivers/gpu/drm/mgag200/mgag200_cursor.c | 22 +- drivers/gpu/drm/mgag200/mgag200_drv.c | 1 + drivers/gpu/drm/mgag200/mgag200_drv.h | 1 + drivers/gpu/drm/mgag200/mgag200_fb.c | 66 +- drivers/gpu/drm/mgag200/mgag200_i2c.c | 1 + drivers/gpu/drm/mgag200/mgag200_main.c | 61 +- drivers/gpu/drm/mgag200/mgag200_mode.c | 221 +- drivers/gpu/drm/mgag200/mgag200_ttm.c | 8 +- drivers/gpu/drm/msm/Kconfig | 15 + drivers/gpu/drm/msm/Makefile | 15 +- drivers/gpu/drm/msm/adreno/a2xx.xml.h | 18 +- drivers/gpu/drm/msm/adreno/a3xx.xml.h | 33 +- drivers/gpu/drm/msm/adreno/a4xx.xml.h | 206 +- drivers/gpu/drm/msm/adreno/adreno_common.xml.h | 18 +- drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h | 18 +- drivers/gpu/drm/msm/dsi/dsi.c | 58 +- drivers/gpu/drm/msm/dsi/dsi.h | 43 +- drivers/gpu/drm/msm/dsi/dsi.xml.h | 211 +- drivers/gpu/drm/msm/dsi/dsi_cfg.c | 92 + drivers/gpu/drm/msm/dsi/dsi_cfg.h | 44 + drivers/gpu/drm/msm/dsi/dsi_host.c | 270 +- drivers/gpu/drm/msm/dsi/dsi_manager.c | 216 +- drivers/gpu/drm/msm/dsi/dsi_phy.c | 607 -- drivers/gpu/drm/msm/dsi/mmss_cc.xml.h | 26 +- drivers/gpu/drm/msm/dsi/phy/dsi_phy.c | 452 + drivers/gpu/drm/msm/dsi/phy/dsi_phy.h | 89 + drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c | 150 + drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c | 166 + drivers/gpu/drm/msm/dsi/pll/dsi_pll.c | 42 +- drivers/gpu/drm/msm/dsi/pll/dsi_pll.h | 9 + drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c | 33 +- drivers/gpu/drm/msm/dsi/sfpb.xml.h | 26 +- drivers/gpu/drm/msm/edp/edp.xml.h | 22 +- drivers/gpu/drm/msm/edp/edp_ctrl.c | 17 +- drivers/gpu/drm/msm/hdmi/hdmi.c | 79 +- drivers/gpu/drm/msm/hdmi/hdmi.h | 32 +- drivers/gpu/drm/msm/hdmi/hdmi.xml.h | 28 +- drivers/gpu/drm/msm/hdmi/hdmi_audio.c | 1 - drivers/gpu/drm/msm/hdmi/hdmi_bridge.c | 16 +- drivers/gpu/drm/msm/hdmi/hdmi_connector.c | 101 +- drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c | 1437 +++ drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c | 52 - drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c | 32 - drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c | 57 - drivers/gpu/drm/msm/hdmi/qfprom.xml.h | 26 +- drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h | 22 +- drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | 8 +- drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c | 19 +- drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | 38 +- drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h | 24 +- drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c | 8 +- drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c | 9 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h | 180 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c | 180 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h | 13 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c | 12 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | 139 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c | 243 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h | 43 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c | 18 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c | 20 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | 80 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h | 57 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | 334 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c | 26 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h | 3 +- drivers/gpu/drm/msm/mdp/mdp_common.xml.h | 28 +- drivers/gpu/drm/msm/mdp/mdp_format.c | 46 +- drivers/gpu/drm/msm/mdp/mdp_kms.c | 3 +- drivers/gpu/drm/msm/mdp/mdp_kms.h | 20 +- drivers/gpu/drm/msm/msm_drv.c | 82 +- drivers/gpu/drm/msm/msm_drv.h | 19 + drivers/gpu/drm/msm/msm_fbdev.c | 34 +- drivers/gpu/drm/nouveau/Kbuild | 1 - drivers/gpu/drm/nouveau/dispnv04/arb.c | 2 +- drivers/gpu/drm/nouveau/dispnv04/dac.c | 45 +- drivers/gpu/drm/nouveau/dispnv04/dfp.c | 23 +- drivers/gpu/drm/nouveau/dispnv04/disp.c | 8 +- drivers/gpu/drm/nouveau/dispnv04/disp.h | 2 +- drivers/gpu/drm/nouveau/dispnv04/hw.c | 29 +- drivers/gpu/drm/nouveau/dispnv04/hw.h | 26 +- drivers/gpu/drm/nouveau/dispnv04/overlay.c | 15 +- drivers/gpu/drm/nouveau/dispnv04/tvnv04.c | 16 +- drivers/gpu/drm/nouveau/dispnv04/tvnv17.c | 30 +- drivers/gpu/drm/nouveau/dispnv04/tvnv17.h | 4 +- drivers/gpu/drm/nouveau/include/nvif/class.h | 199 +- drivers/gpu/drm/nouveau/include/nvif/client.h | 27 +- drivers/gpu/drm/nouveau/include/nvif/device.h | 73 +- drivers/gpu/drm/nouveau/include/nvif/ioctl.h | 34 +- drivers/gpu/drm/nouveau/include/nvif/notify.h | 12 +- drivers/gpu/drm/nouveau/include/nvif/object.h | 70 +- drivers/gpu/drm/nouveau/include/nvif/os.h | 7 +- drivers/gpu/drm/nouveau/include/nvkm/core/client.h | 65 +- drivers/gpu/drm/nouveau/include/nvkm/core/debug.h | 9 +- drivers/gpu/drm/nouveau/include/nvkm/core/device.h | 274 +- drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h | 62 - drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h | 51 - drivers/gpu/drm/nouveau/include/nvkm/core/engine.h | 81 +- drivers/gpu/drm/nouveau/include/nvkm/core/enum.h | 3 +- drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h | 62 +- drivers/gpu/drm/nouveau/include/nvkm/core/handle.h | 34 - drivers/gpu/drm/nouveau/include/nvkm/core/memory.h | 53 + drivers/gpu/drm/nouveau/include/nvkm/core/mm.h | 3 +- drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h | 53 - drivers/gpu/drm/nouveau/include/nvkm/core/object.h | 261 +- drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h | 22 + drivers/gpu/drm/nouveau/include/nvkm/core/option.h | 1 + drivers/gpu/drm/nouveau/include/nvkm/core/parent.h | 58 - drivers/gpu/drm/nouveau/include/nvkm/core/pci.h | 14 + drivers/gpu/drm/nouveau/include/nvkm/core/printk.h | 29 - drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h | 28 +- drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h | 139 +- drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h | 35 + drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h | 4 +- drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h | 17 +- .../gpu/drm/nouveau/include/nvkm/engine/cipher.h | 2 +- .../gpu/drm/nouveau/include/nvkm/engine/device.h | 30 - drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h | 39 +- drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h | 32 + .../gpu/drm/nouveau/include/nvkm/engine/dmaobj.h | 26 - .../gpu/drm/nouveau/include/nvkm/engine/falcon.h | 75 +- drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h | 160 +- drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h | 118 +- drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h | 63 +- .../gpu/drm/nouveau/include/nvkm/engine/mspdec.h | 9 +- .../gpu/drm/nouveau/include/nvkm/engine/msppp.h | 7 +- .../gpu/drm/nouveau/include/nvkm/engine/msvld.h | 10 +- drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h | 35 +- drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h | 4 +- drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h | 50 +- drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h | 4 +- .../gpu/drm/nouveau/include/nvkm/engine/xtensa.h | 38 +- drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h | 29 +- drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h | 15 +- .../gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h | 10 +- .../drm/nouveau/include/nvkm/subdev/bios/init.h | 1 + .../drm/nouveau/include/nvkm/subdev/bios/ramcfg.h | 24 +- .../drm/nouveau/include/nvkm/subdev/bios/rammap.h | 4 + drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h | 44 +- drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h | 70 +- .../gpu/drm/nouveau/include/nvkm/subdev/devinit.h | 43 +- drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h | 139 +- drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h | 26 +- drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h | 31 +- drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h | 151 +- drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h | 30 +- .../gpu/drm/nouveau/include/nvkm/subdev/instmem.h | 54 +- drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h | 37 +- drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h | 31 +- drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h | 78 +- drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h | 30 +- drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h | 34 + drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h | 31 +- .../gpu/drm/nouveau/include/nvkm/subdev/therm.h | 106 +- .../gpu/drm/nouveau/include/nvkm/subdev/timer.h | 83 +- drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h | 30 +- drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h | 48 +- drivers/gpu/drm/nouveau/nouveau_abi16.c | 221 +- drivers/gpu/drm/nouveau/nouveau_abi16.h | 4 +- drivers/gpu/drm/nouveau/nouveau_acpi.c | 4 +- drivers/gpu/drm/nouveau/nouveau_acpi.h | 4 +- drivers/gpu/drm/nouveau/nouveau_agp.c | 195 - drivers/gpu/drm/nouveau/nouveau_agp.h | 10 - drivers/gpu/drm/nouveau/nouveau_backlight.c | 22 +- drivers/gpu/drm/nouveau/nouveau_bios.c | 44 +- drivers/gpu/drm/nouveau/nouveau_bo.c | 84 +- drivers/gpu/drm/nouveau/nouveau_chan.c | 123 +- drivers/gpu/drm/nouveau/nouveau_chan.h | 2 +- drivers/gpu/drm/nouveau/nouveau_connector.c | 40 +- drivers/gpu/drm/nouveau/nouveau_display.c | 30 +- drivers/gpu/drm/nouveau/nouveau_dma.c | 10 +- drivers/gpu/drm/nouveau/nouveau_dma.h | 2 +- drivers/gpu/drm/nouveau/nouveau_dp.c | 17 +- drivers/gpu/drm/nouveau/nouveau_drm.c | 148 +- drivers/gpu/drm/nouveau/nouveau_drm.h | 33 +- drivers/gpu/drm/nouveau/nouveau_encoder.h | 4 +- drivers/gpu/drm/nouveau/nouveau_fbcon.c | 39 +- drivers/gpu/drm/nouveau/nouveau_fence.c | 15 +- drivers/gpu/drm/nouveau/nouveau_fence.h | 2 +- drivers/gpu/drm/nouveau/nouveau_gem.c | 63 +- drivers/gpu/drm/nouveau/nouveau_hwmon.c | 10 +- drivers/gpu/drm/nouveau/nouveau_nvif.c | 8 +- drivers/gpu/drm/nouveau/nouveau_platform.c | 227 +- drivers/gpu/drm/nouveau/nouveau_platform.h | 47 +- drivers/gpu/drm/nouveau/nouveau_sysfs.c | 8 +- drivers/gpu/drm/nouveau/nouveau_ttm.c | 66 +- drivers/gpu/drm/nouveau/nouveau_vga.c | 9 +- drivers/gpu/drm/nouveau/nv04_fbcon.c | 12 +- drivers/gpu/drm/nouveau/nv04_fence.c | 6 +- drivers/gpu/drm/nouveau/nv10_fence.c | 2 +- drivers/gpu/drm/nouveau/nv17_fence.c | 4 +- drivers/gpu/drm/nouveau/nv50_display.c | 197 +- drivers/gpu/drm/nouveau/nv50_fbcon.c | 2 +- drivers/gpu/drm/nouveau/nv50_fence.c | 4 +- drivers/gpu/drm/nouveau/nv84_fence.c | 6 +- drivers/gpu/drm/nouveau/nvc0_fbcon.c | 2 +- drivers/gpu/drm/nouveau/nvif/client.c | 68 +- drivers/gpu/drm/nouveau/nvif/device.c | 55 +- drivers/gpu/drm/nouveau/nvif/notify.c | 49 +- drivers/gpu/drm/nouveau/nvif/object.c | 200 +- drivers/gpu/drm/nouveau/nvkm/core/Kbuild | 7 +- drivers/gpu/drm/nouveau/nvkm/core/client.c | 188 +- drivers/gpu/drm/nouveau/nvkm/core/engctx.c | 239 - drivers/gpu/drm/nouveau/nvkm/core/engine.c | 154 +- drivers/gpu/drm/nouveau/nvkm/core/enum.c | 28 +- drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c | 379 +- drivers/gpu/drm/nouveau/nvkm/core/handle.c | 221 - drivers/gpu/drm/nouveau/nvkm/core/ioctl.c | 395 +- drivers/gpu/drm/nouveau/nvkm/core/memory.c | 64 + drivers/gpu/drm/nouveau/nvkm/core/mm.c | 2 +- drivers/gpu/drm/nouveau/nvkm/core/namedb.c | 199 - drivers/gpu/drm/nouveau/nvkm/core/object.c | 400 +- drivers/gpu/drm/nouveau/nvkm/core/oproxy.c | 200 + drivers/gpu/drm/nouveau/nvkm/core/option.c | 20 +- drivers/gpu/drm/nouveau/nvkm/core/parent.c | 159 - drivers/gpu/drm/nouveau/nvkm/core/printk.c | 103 - drivers/gpu/drm/nouveau/nvkm/core/ramht.c | 144 +- drivers/gpu/drm/nouveau/nvkm/core/subdev.c | 208 +- drivers/gpu/drm/nouveau/nvkm/engine/Kbuild | 2 +- drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c | 79 +- drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc | 8 +- .../drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h | 4 +- .../drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h | 4 +- drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c | 180 +- drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c | 174 +- drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c | 167 +- drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c | 144 +- drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h | 7 + drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c | 189 +- drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild | 12 +- drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c | 8 +- drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h | 4 +- drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 2923 ++++- drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c | 82 +- drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h | 12 + drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c | 358 - drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c | 326 - drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c | 190 - drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c | 89 - drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c | 204 - drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c | 131 - drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c | 153 - drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c | 427 - drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c | 478 - drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c | 1686 +++ drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h | 54 +- drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c | 295 + drivers/gpu/drm/nouveau/nvkm/engine/device/user.c | 371 + drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild | 86 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c | 325 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c | 80 + .../gpu/drm/nouveau/nvkm/engine/disp/basegf119.c | 114 + .../gpu/drm/nouveau/nvkm/engine/disp/basegk104.c | 38 + .../gpu/drm/nouveau/nvkm/engine/disp/basegk110.c | 38 + .../gpu/drm/nouveau/nvkm/engine/disp/basegt200.c | 38 + .../gpu/drm/nouveau/nvkm/engine/disp/basegt215.c | 38 + .../gpu/drm/nouveau/nvkm/engine/disp/basenv50.c | 123 + .../gpu/drm/nouveau/nvkm/engine/disp/changf119.c | 49 + .../gpu/drm/nouveau/nvkm/engine/disp/channv50.c | 301 + .../gpu/drm/nouveau/nvkm/engine/disp/channv50.h | 127 + drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c | 118 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h | 61 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c | 117 + drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c | 63 + .../gpu/drm/nouveau/nvkm/engine/disp/coregf119.c | 244 + .../gpu/drm/nouveau/nvkm/engine/disp/coregk104.c | 132 + .../gpu/drm/nouveau/nvkm/engine/disp/coregk110.c | 38 + .../gpu/drm/nouveau/nvkm/engine/disp/coregm107.c | 38 + .../gpu/drm/nouveau/nvkm/engine/disp/coregm204.c | 38 + .../gpu/drm/nouveau/nvkm/engine/disp/coregt200.c | 38 + .../gpu/drm/nouveau/nvkm/engine/disp/coregt215.c | 38 + .../gpu/drm/nouveau/nvkm/engine/disp/corenv50.c | 242 + drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c | 37 + .../gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c | 37 + .../gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c | 37 + .../gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c | 37 + .../gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c | 68 + drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c | 63 +- .../gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c | 100 + .../gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c | 247 + .../gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h | 91 + drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c | 86 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c | 275 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c | 139 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c | 1310 --- drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c | 536 + drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c | 265 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c | 100 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c | 100 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c | 109 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c | 147 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c | 105 +- .../gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c | 73 - .../gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c | 83 + .../gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c | 30 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c | 55 +- .../gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c | 79 - .../gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c | 80 + .../gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c | 41 +- .../gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c | 55 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c | 186 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c | 1667 +-- drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h | 231 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c | 37 + .../gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c | 37 + .../gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c | 37 + .../gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c | 37 + .../gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c | 68 + drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c | 127 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h | 82 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c | 202 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h | 63 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c | 77 + .../gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c | 101 + .../gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c | 103 + .../gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c | 80 + .../gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c | 38 + .../gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c | 111 + .../gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c | 81 + .../gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c | 83 + .../gpu/drm/nouveau/nvkm/engine/disp/piornv50.c | 165 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h | 78 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c | 58 + drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c | 58 + .../gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c | 171 + .../gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c | 58 + .../gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c | 58 + .../gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c | 58 + .../gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c | 58 + .../gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c | 58 + .../gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c | 58 + .../gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c | 139 + .../gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c | 399 + .../gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h | 43 + drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c | 95 +- .../gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c | 124 - .../gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c | 117 + .../gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c | 74 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c | 37 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c | 138 +- drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild | 11 + drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c | 157 + drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c | 36 + drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c | 36 + drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c | 36 + drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c | 36 + drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h | 18 + drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c | 144 + drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h | 18 + .../gpu/drm/nouveau/nvkm/engine/dma/usergf100.c | 149 + .../gpu/drm/nouveau/nvkm/engine/dma/usergf119.c | 131 + drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c | 133 + drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c | 156 + drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild | 5 - drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c | 164 - drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c | 176 - drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c | 165 - drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c | 163 - drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c | 195 - drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h | 28 - drivers/gpu/drm/nouveau/nvkm/engine/falcon.c | 292 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild | 20 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c | 345 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c | 415 + drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h | 33 + drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c | 285 + .../gpu/drm/nouveau/nvkm/engine/fifo/changf100.h | 24 + .../gpu/drm/nouveau/nvkm/engine/fifo/changk104.h | 29 + .../gpu/drm/nouveau/nvkm/engine/fifo/channv04.h | 24 + .../gpu/drm/nouveau/nvkm/engine/fifo/channv50.c | 270 + .../gpu/drm/nouveau/nvkm/engine/fifo/channv50.h | 35 + drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c | 93 + drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c | 220 + drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c | 96 + drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c | 97 + drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c | 243 + drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c | 91 + drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c | 481 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c | 924 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h | 31 + drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c | 1037 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h | 89 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c | 30 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c | 30 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c | 45 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c | 44 + .../gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c | 94 + .../gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c | 293 + .../gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c | 323 + .../gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c | 34 + .../gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c | 92 + drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c | 638 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h | 170 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c | 153 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c | 208 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c | 335 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c | 533 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h | 39 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h | 26 + .../gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h | 132 + drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild | 48 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c | 136 + drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | 327 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h | 80 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c | 15 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c | 52 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c | 15 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c | 88 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c | 15 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c | 143 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c | 15 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c | 15 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c | 15 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c | 80 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c | 135 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c | 119 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c | 15 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c | 103 + drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c | 13 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h | 9 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c | 25 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c | 196 + drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | 1556 +-- drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h | 128 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c | 32 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c | 45 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c | 47 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c | 34 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c | 34 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c | 227 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c | 43 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c | 32 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c | 43 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c | 349 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c | 215 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c | 223 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c | 32 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c | 83 + drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c | 47 + drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c | 48 + drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c | 46 + drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c | 48 + drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c | 1213 ++- drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c | 824 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h | 13 + drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c | 59 + drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c | 59 + drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c | 567 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h | 37 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c | 220 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c | 180 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c | 331 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c | 218 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c | 218 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c | 590 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h | 37 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c | 108 + drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c | 877 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h | 32 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h | 38 + drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c | 84 +- drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c | 406 +- drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h | 27 +- drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c | 107 +- drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c | 248 +- drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c | 228 +- drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h | 16 + drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild | 2 + drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c | 32 + drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c | 100 +- drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c | 100 +- drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c | 98 +- drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c | 43 + drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h | 11 + drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild | 2 + drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c | 31 + drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c | 100 +- drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c | 100 +- drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c | 43 + drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h | 9 + drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild | 3 + drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c | 31 + drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c | 101 +- drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c | 100 +- drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c | 98 +- drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c | 43 + drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c | 43 + drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h | 11 + drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild | 5 +- drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c | 911 +- drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c | 108 - drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c | 126 +- drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c | 214 +- drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h | 16 +- drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c | 66 + drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c | 80 + drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c | 154 +- drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c | 57 - drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c | 157 + drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c | 113 +- drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c | 97 +- drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h | 18 +- drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c | 152 +- drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h | 87 +- .../gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s | 6 +- .../drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h | 4 +- drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c | 138 +- drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild | 5 + drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c | 110 + drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c | 111 + drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h | 26 + drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c | 188 +- drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c | 151 +- drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c | 106 +- drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c | 224 +- drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h | 35 +- drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c | 85 + drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h | 21 + drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h | 21 + drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c | 79 +- drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c | 192 +- drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c | 133 +- drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c | 56 + drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c | 205 +- drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h | 23 + drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c | 40 +- drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c | 287 +- drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h | 26 + drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h | 33 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c | 25 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c | 20 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c | 26 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c | 14 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c | 147 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c | 14 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c | 28 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c | 30 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c | 26 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c | 72 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c | 36 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c | 81 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c | 16 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c | 18 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c | 30 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c | 57 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c | 7 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c | 596 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c | 33 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c | 11 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c | 31 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c | 92 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c | 174 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c | 34 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h | 4 + drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c | 14 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c | 187 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c | 133 +- .../gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c | 8 +- .../gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c | 18 +- .../gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c | 18 +- .../gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c | 36 +- .../gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c | 26 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c | 38 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c | 98 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c | 40 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c | 52 +- drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c | 26 +- drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c | 64 + drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c | 46 +- drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c | 71 +- drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c | 32 +- drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h | 6 +- drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c | 78 +- drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h | 21 - drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c | 81 +- drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c | 93 +- drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h | 18 + drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c | 176 +- drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c | 41 +- drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c | 318 +- drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c | 326 +- drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c | 356 +- drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c | 344 +- drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h | 6 +- drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c | 282 +- drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c | 56 +- drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c | 173 +- drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c | 294 +- drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h | 24 +- drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c | 6 +- drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h | 26 + drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c | 128 +- .../gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h | 5 +- drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c | 46 +- drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c | 44 +- .../gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c | 82 +- .../gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c | 38 +- .../gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c | 125 +- .../gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c | 77 +- .../gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c | 44 +- drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c | 242 +- drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h | 18 +- drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c | 71 +- drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c | 44 +- drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c | 24 +- drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c | 44 +- drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c | 151 +- drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h | 15 +- drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h | 33 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild | 2 + drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c | 197 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c | 23 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c | 18 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c | 121 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h | 25 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c | 27 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c | 55 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c | 27 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c | 23 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c | 23 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c | 23 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c | 60 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h | 53 - drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c | 41 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c | 26 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c | 53 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c | 32 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c | 77 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c | 33 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c | 33 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c | 47 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h | 14 - drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c | 54 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c | 57 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c | 29 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c | 27 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c | 27 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c | 27 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c | 351 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h | 24 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h | 107 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c | 100 + drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h | 50 + drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h | 25 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c | 342 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c | 263 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c | 37 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c | 304 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c | 104 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c | 54 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c | 39 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c | 38 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c | 47 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c | 176 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h | 14 + drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c | 51 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c | 50 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c | 51 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c | 35 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c | 507 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c | 6 +- drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c | 37 +- drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c | 57 +- drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c | 40 +- drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c | 53 +- drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h | 9 +- drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c | 147 +- drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c | 41 +- drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c | 84 - drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c | 86 + drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c | 47 +- drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c | 41 +- drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c | 46 +- drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h | 37 +- drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild | 30 +- drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c | 374 +- drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c | 151 +- drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h | 30 + drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c | 181 + drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c | 181 + drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c | 742 +- drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c | 149 +- drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c | 245 + drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h | 37 + drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c | 95 + drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c | 96 + drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c | 86 + drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c | 113 + drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c | 241 +- drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c | 106 - drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c | 26 +- drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c | 40 + drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c | 39 +- drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c | 199 +- drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c | 104 +- drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c | 96 +- drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c | 109 +- drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h | 32 - drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c | 119 +- drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h | 107 +- drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c | 87 +- drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c | 51 + drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c | 87 +- drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c | 18 +- drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c | 36 + drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c | 36 + drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h | 13 - drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h | 67 +- drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c | 99 +- drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c | 124 +- drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c | 93 +- drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c | 301 +- .../gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c | 394 +- drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c | 240 +- drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h | 36 - drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c | 247 +- drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c | 266 +- drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h | 60 +- drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c | 124 +- drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c | 202 +- drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c | 43 +- drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c | 146 +- drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h | 76 +- drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild | 4 - drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c | 178 +- drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c | 37 - drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c | 58 +- drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c | 97 +- drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c | 38 - drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c | 26 +- drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c | 85 +- drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h | 20 - drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c | 44 - drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c | 46 +- drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c | 36 - drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c | 66 +- drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h | 46 +- drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c | 234 +- drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c | 138 +- drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c | 128 +- drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h | 15 +- drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c | 136 +- drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c | 195 +- drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c | 174 +- drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h | 39 + drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c | 80 +- drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c | 28 +- drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c | 47 +- drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h | 15 + drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild | 7 + drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c | 175 + drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h | 18 + drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c | 182 + drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c | 44 + drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c | 58 + drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c | 65 + drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c | 37 + drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c | 51 + drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h | 19 + drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild | 3 +- drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c | 230 +- .../gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4 | 70 - .../drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h | 1795 --- .../gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4 | 70 + .../drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h | 1795 +++ drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c | 19 +- drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c | 40 - drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c | 39 + drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c | 102 +- drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c | 59 +- drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c | 19 +- drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c | 149 +- drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c | 41 + drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c | 31 +- drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c | 69 +- drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h | 30 +- drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c | 305 +- drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c | 117 +- drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c | 3 +- drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c | 67 +- drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c | 80 +- drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c | 190 +- drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c | 174 - drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c | 153 + drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c | 66 +- drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c | 85 +- drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c | 51 +- drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c | 129 +- drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c | 106 +- drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h | 86 +- drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c | 122 +- drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild | 2 + drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c | 158 +- drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c | 43 +- drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c | 253 +- drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h | 25 - drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c | 88 + drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c | 85 + drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h | 22 + .../gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h | 7 + drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c | 128 +- drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c | 123 +- drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c | 15 +- drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c | 33 +- drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h | 20 + drivers/gpu/drm/omapdrm/omap_crtc.c | 6 +- drivers/gpu/drm/omapdrm/omap_fbdev.c | 38 +- drivers/gpu/drm/panel/Kconfig | 16 +- drivers/gpu/drm/panel/Makefile | 5 +- drivers/gpu/drm/panel/panel-ld9040.c | 389 - drivers/gpu/drm/panel/panel-lg-lg4573.c | 298 + drivers/gpu/drm/panel/panel-s6e8aa0.c | 1067 -- drivers/gpu/drm/panel/panel-samsung-ld9040.c | 389 + drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c | 1067 ++ drivers/gpu/drm/panel/panel-simple.c | 99 +- drivers/gpu/drm/qxl/qxl_display.c | 10 +- drivers/gpu/drm/qxl/qxl_fb.c | 40 +- drivers/gpu/drm/qxl/qxl_object.c | 4 +- drivers/gpu/drm/qxl/qxl_release.c | 4 +- drivers/gpu/drm/radeon/atombios_encoders.c | 19 +- drivers/gpu/drm/radeon/dce6_afmt.c | 29 +- drivers/gpu/drm/radeon/radeon.h | 1 + drivers/gpu/drm/radeon/radeon_device.c | 4 + drivers/gpu/drm/radeon/radeon_dp_mst.c | 25 +- drivers/gpu/drm/radeon/radeon_encoders.c | 1 - drivers/gpu/drm/radeon/radeon_fb.c | 90 +- drivers/gpu/drm/radeon/radeon_kfd.c | 3 +- drivers/gpu/drm/radeon/radeon_kms.c | 5 +- drivers/gpu/drm/radeon/radeon_legacy_encoders.c | 1 + drivers/gpu/drm/radeon/radeon_mode.h | 1 + drivers/gpu/drm/radeon/radeon_object.c | 4 +- drivers/gpu/drm/radeon/radeon_pm.c | 48 +- drivers/gpu/drm/radeon/si_dpm.c | 2 + drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 6 +- drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c | 47 +- drivers/gpu/drm/rockchip/rockchip_drm_gem.c | 12 +- drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 269 +- drivers/gpu/drm/rockchip/rockchip_drm_vop.h | 88 + drivers/gpu/drm/shmobile/shmob_drm_crtc.c | 2 +- drivers/gpu/drm/sti/Makefile | 7 +- drivers/gpu/drm/sti/sti_compositor.c | 141 +- drivers/gpu/drm/sti/sti_compositor.h | 12 +- drivers/gpu/drm/sti/sti_crtc.c | 381 + drivers/gpu/drm/sti/sti_crtc.h | 22 + drivers/gpu/drm/sti/sti_cursor.c | 243 +- drivers/gpu/drm/sti/sti_cursor.h | 5 +- drivers/gpu/drm/sti/sti_drm_crtc.c | 322 - drivers/gpu/drm/sti/sti_drm_crtc.h | 22 - drivers/gpu/drm/sti/sti_drm_drv.c | 327 - drivers/gpu/drm/sti/sti_drm_drv.h | 35 - drivers/gpu/drm/sti/sti_drm_plane.c | 251 - drivers/gpu/drm/sti/sti_drm_plane.h | 18 - drivers/gpu/drm/sti/sti_drv.c | 294 + drivers/gpu/drm/sti/sti_drv.h | 35 + drivers/gpu/drm/sti/sti_gdp.c | 536 +- drivers/gpu/drm/sti/sti_gdp.h | 7 +- drivers/gpu/drm/sti/sti_hdmi.c | 27 +- drivers/gpu/drm/sti/sti_hqvdp.c | 482 +- drivers/gpu/drm/sti/sti_hqvdp.h | 12 - drivers/gpu/drm/sti/sti_layer.c | 213 - drivers/gpu/drm/sti/sti_layer.h | 131 - drivers/gpu/drm/sti/sti_mixer.c | 72 +- drivers/gpu/drm/sti/sti_mixer.h | 27 +- drivers/gpu/drm/sti/sti_plane.c | 122 + drivers/gpu/drm/sti/sti_plane.h | 71 + drivers/gpu/drm/sti/sti_tvout.c | 54 +- drivers/gpu/drm/sti/sti_vid.c | 72 +- drivers/gpu/drm/sti/sti_vid.h | 19 +- drivers/gpu/drm/tegra/dc.c | 300 +- drivers/gpu/drm/tegra/dc.h | 24 +- drivers/gpu/drm/tegra/dpaux.c | 63 +- drivers/gpu/drm/tegra/dpaux.h | 2 + drivers/gpu/drm/tegra/drm.c | 16 +- drivers/gpu/drm/tegra/drm.h | 10 + drivers/gpu/drm/tegra/dsi.c | 126 +- drivers/gpu/drm/tegra/dsi.h | 4 + drivers/gpu/drm/tegra/fb.c | 35 +- drivers/gpu/drm/tegra/hdmi.c | 78 +- drivers/gpu/drm/tegra/output.c | 20 +- drivers/gpu/drm/tegra/rgb.c | 49 +- drivers/gpu/drm/tegra/sor.c | 1664 ++- drivers/gpu/drm/tegra/sor.h | 298 +- drivers/gpu/drm/tilcdc/tilcdc_panel.c | 22 +- drivers/gpu/drm/ttm/ttm_bo.c | 28 +- drivers/gpu/drm/ttm/ttm_tt.c | 4 +- drivers/gpu/drm/udl/udl_fb.c | 41 +- drivers/gpu/drm/vgem/vgem_drv.c | 2 +- drivers/gpu/drm/via/via_dmablit.c | 2 +- drivers/gpu/drm/virtio/virtgpu_debugfs.c | 4 +- drivers/gpu/drm/virtio/virtgpu_fb.c | 32 +- drivers/gpu/drm/virtio/virtgpu_fence.c | 2 +- drivers/gpu/drm/vmwgfx/Kconfig | 2 +- drivers/gpu/drm/vmwgfx/Makefile | 3 +- .../gpu/drm/vmwgfx/device_include/includeCheck.h | 3 + .../gpu/drm/vmwgfx/device_include/svga3d_caps.h | 110 + drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h | 2071 ++++ .../gpu/drm/vmwgfx/device_include/svga3d_devcaps.h | 457 + drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h | 1487 +++ .../gpu/drm/vmwgfx/device_include/svga3d_limits.h | 99 + drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h | 50 + .../drm/vmwgfx/device_include/svga3d_surfacedefs.h | 1204 +++ .../gpu/drm/vmwgfx/device_include/svga3d_types.h | 1633 +++ .../gpu/drm/vmwgfx/device_include/svga_escape.h | 89 + .../gpu/drm/vmwgfx/device_include/svga_overlay.h | 199 + drivers/gpu/drm/vmwgfx/device_include/svga_reg.h | 1936 ++++ drivers/gpu/drm/vmwgfx/device_include/svga_types.h | 46 + .../gpu/drm/vmwgfx/device_include/vm_basic_types.h | 21 + .../drm/vmwgfx/device_include/vmware_pack_begin.h | 25 + .../drm/vmwgfx/device_include/vmware_pack_end.h | 25 + drivers/gpu/drm/vmwgfx/svga3d_reg.h | 2627 ----- drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h | 912 -- drivers/gpu/drm/vmwgfx/svga_escape.h | 89 - drivers/gpu/drm/vmwgfx/svga_overlay.h | 201 - drivers/gpu/drm/vmwgfx/svga_reg.h | 1564 --- drivers/gpu/drm/vmwgfx/svga_types.h | 45 - drivers/gpu/drm/vmwgfx/vmwgfx_binding.c | 1294 +++ drivers/gpu/drm/vmwgfx/vmwgfx_binding.h | 209 + drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 24 +- drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c | 1317 +++ drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c | 26 +- drivers/gpu/drm/vmwgfx/vmwgfx_context.c | 786 +- drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c | 661 ++ drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c | 184 +- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 518 +- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 344 +- drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 2059 +++- drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | 575 +- drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 10 +- drivers/gpu/drm/vmwgfx/vmwgfx_fence.h | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 145 +- drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 18 +- drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 47 +- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 1699 ++- drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | 194 +- drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 49 +- drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | 212 +- drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | 18 +- drivers/gpu/drm/vmwgfx/vmwgfx_reg.h | 12 +- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 306 +- drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h | 14 +- drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | 556 +- drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | 502 +- drivers/gpu/drm/vmwgfx/vmwgfx_so.c | 555 + drivers/gpu/drm/vmwgfx/vmwgfx_so.h | 160 + drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | 1266 +++ drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 327 +- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c | 2 +- drivers/gpu/host1x/mipi.c | 253 +- drivers/gpu/ipu-v3/ipu-common.c | 11 +- drivers/gpu/vga/vga_switcheroo.c | 95 +- drivers/gpu/vga/vgaarb.c | 142 +- drivers/hid/Kconfig | 7 + drivers/hid/Makefile | 1 + drivers/hid/hid-chicony.c | 26 + drivers/hid/hid-core.c | 31 +- drivers/hid/hid-cp2112.c | 97 +- drivers/hid/hid-gembird.c | 116 + drivers/hid/hid-ids.h | 20 +- drivers/hid/hid-input.c | 5 +- drivers/hid/hid-lenovo.c | 59 +- drivers/hid/hid-lg.c | 2 + drivers/hid/hid-microsoft.c | 6 +- drivers/hid/hid-multitouch.c | 8 + drivers/hid/hid-picolcd_backlight.c | 3 +- drivers/hid/hid-picolcd_cir.c | 3 +- drivers/hid/hid-picolcd_lcd.c | 3 +- drivers/hid/hid-rmi.c | 163 +- drivers/hid/hid-sensor-hub.c | 3 + drivers/hid/hid-sony.c | 22 +- drivers/hid/i2c-hid/i2c-hid.c | 28 +- drivers/hid/usbhid/hid-core.c | 3 +- drivers/hid/usbhid/hid-quirks.c | 6 +- drivers/hid/wacom.h | 7 +- drivers/hid/wacom_sys.c | 285 +- drivers/hid/wacom_wac.c | 504 +- drivers/hid/wacom_wac.h | 15 +- drivers/hsi/clients/cmt_speech.c | 2 +- drivers/hv/channel.c | 4 +- drivers/hv/channel_mgmt.c | 51 +- drivers/hv/hv.c | 152 +- drivers/hv/hv_balloon.c | 26 +- drivers/hv/hv_fcopy.c | 21 +- drivers/hv/hv_kvp.c | 3 + drivers/hv/hyperv_vmbus.h | 16 +- drivers/hv/ring_buffer.c | 14 +- drivers/hv/vmbus_drv.c | 353 +- drivers/hwmon/Kconfig | 8 +- drivers/hwmon/abx500.c | 1 + drivers/hwmon/f71882fg.c | 176 +- drivers/hwmon/fam15h_power.c | 36 +- drivers/hwmon/g762.c | 1 - drivers/hwmon/gpio-fan.c | 1 + drivers/hwmon/it87.c | 43 +- drivers/hwmon/lm70.c | 34 +- drivers/hwmon/lm75.c | 2 +- drivers/hwmon/nct6775.c | 48 +- drivers/hwmon/nct7802.c | 317 +- drivers/hwmon/ntc_thermistor.c | 2 +- drivers/hwmon/pmbus/Kconfig | 20 +- drivers/hwmon/pmbus/Makefile | 1 + drivers/hwmon/pmbus/adm1275.c | 358 +- drivers/hwmon/pmbus/lm25066.c | 7 +- drivers/hwmon/pmbus/ltc2978.c | 482 +- drivers/hwmon/pmbus/max20751.c | 64 + drivers/hwmon/pmbus/max34440.c | 9 +- drivers/hwmon/pmbus/max8688.c | 19 +- drivers/hwmon/pmbus/pmbus.c | 5 + drivers/hwmon/pmbus/pmbus.h | 448 +- drivers/hwmon/pmbus/pmbus_core.c | 31 +- drivers/hwmon/pmbus/zl6100.c | 11 +- drivers/hwmon/pwm-fan.c | 1 + drivers/hwmon/sht15.c | 20 +- drivers/hwmon/tmp102.c | 2 +- drivers/hwtracing/coresight/coresight-etm.h | 7 +- drivers/hwtracing/coresight/coresight-etm3x.c | 33 +- drivers/hwtracing/coresight/coresight-etm4x.c | 37 +- drivers/hwtracing/coresight/coresight-etm4x.h | 7 +- drivers/hwtracing/coresight/coresight-replicator.c | 13 +- drivers/i2c/busses/Kconfig | 19 +- drivers/i2c/busses/Makefile | 2 + drivers/i2c/busses/i2c-cadence.c | 69 +- drivers/i2c/busses/i2c-designware-core.c | 3 +- drivers/i2c/busses/i2c-designware-pcidrv.c | 4 +- drivers/i2c/busses/i2c-emev2.c | 332 + drivers/i2c/busses/i2c-i801.c | 120 +- drivers/i2c/busses/i2c-lpc2k.c | 513 + drivers/i2c/busses/i2c-mt65xx.c | 21 +- drivers/i2c/busses/i2c-mv64xxx.c | 2 - drivers/i2c/busses/i2c-omap.c | 600 +- drivers/i2c/busses/i2c-parport.c | 25 +- drivers/i2c/busses/i2c-parport.h | 8 + drivers/i2c/busses/i2c-pnx.c | 10 +- drivers/i2c/busses/i2c-pxa.c | 112 +- drivers/i2c/busses/i2c-tegra.c | 52 +- drivers/i2c/busses/i2c-viperboard.c | 10 +- drivers/i2c/busses/i2c-xiic.c | 74 +- drivers/i2c/i2c-core.c | 239 +- drivers/i2c/i2c-slave-eeprom.c | 1 - drivers/i2c/muxes/Kconfig | 11 + drivers/i2c/muxes/Makefile | 1 + drivers/i2c/muxes/i2c-arb-gpio-challenge.c | 3 +- drivers/i2c/muxes/i2c-mux-gpio.c | 1 + drivers/i2c/muxes/i2c-mux-pca9541.c | 1 - drivers/i2c/muxes/i2c-mux-pca954x.c | 1 - drivers/i2c/muxes/i2c-mux-pinctrl.c | 1 + drivers/i2c/muxes/i2c-mux-reg.c | 290 + drivers/idle/intel_idle.c | 80 +- drivers/iio/accel/Kconfig | 26 +- drivers/iio/accel/bma180.c | 1 - drivers/iio/accel/bmc150-accel.c | 243 +- drivers/iio/accel/kxcjk-1013.c | 5 +- drivers/iio/accel/mma8452.c | 215 +- drivers/iio/accel/mma9551_core.c | 35 +- drivers/iio/accel/mma9551_core.h | 6 +- drivers/iio/accel/mma9553.c | 83 +- drivers/iio/accel/st_accel.h | 1 + drivers/iio/accel/st_accel_core.c | 12 +- drivers/iio/accel/st_accel_i2c.c | 6 +- drivers/iio/accel/st_accel_spi.c | 1 + drivers/iio/accel/stk8312.c | 429 +- drivers/iio/accel/stk8ba50.c | 369 +- drivers/iio/adc/Kconfig | 52 +- drivers/iio/adc/berlin2-adc.c | 22 +- drivers/iio/adc/cc10001_adc.c | 26 +- drivers/iio/adc/mcp320x.c | 16 +- drivers/iio/adc/mcp3422.c | 1 - drivers/iio/adc/ti-adc081c.c | 1 - drivers/iio/adc/twl4030-madc.c | 34 + drivers/iio/adc/vf610_adc.c | 79 +- drivers/iio/common/ssp_sensors/ssp_dev.c | 1 - drivers/iio/common/st_sensors/st_sensors_core.c | 52 +- drivers/iio/dac/ad5064.c | 1 - drivers/iio/dac/ad5380.c | 1 - drivers/iio/dac/ad5446.c | 1 - drivers/iio/dac/max5821.c | 1 - drivers/iio/frequency/adf4350.c | 1 - drivers/iio/gyro/adis16136.c | 6 + drivers/iio/gyro/adis16260.c | 137 +- drivers/iio/gyro/itg3200_core.c | 1 - drivers/iio/gyro/st_gyro_core.c | 3 + drivers/iio/gyro/st_gyro_i2c.c | 1 - drivers/iio/humidity/dht11.c | 65 +- drivers/iio/humidity/si7005.c | 1 - drivers/iio/imu/adis16400_core.c | 44 +- drivers/iio/imu/inv_mpu6050/inv_mpu_core.c | 7 +- drivers/iio/imu/kmx61.c | 8 +- drivers/iio/industrialio-buffer.c | 33 +- drivers/iio/industrialio-core.c | 33 +- drivers/iio/industrialio-event.c | 6 + drivers/iio/industrialio-trigger.c | 27 +- drivers/iio/industrialio-triggered-buffer.c | 12 +- drivers/iio/light/Kconfig | 34 +- drivers/iio/light/Makefile | 3 + drivers/iio/light/acpi-als.c | 18 +- drivers/iio/light/apds9300.c | 1 - drivers/iio/light/bh1750.c | 1 - drivers/iio/light/cm32181.c | 2 +- drivers/iio/light/cm3232.c | 2 +- drivers/iio/light/cm3323.c | 19 +- drivers/iio/light/cm36651.c | 2 +- drivers/iio/light/gp2ap020a00f.c | 2 +- drivers/iio/light/hid-sensor-prox.c | 3 +- drivers/iio/light/isl29125.c | 13 +- drivers/iio/light/jsa1212.c | 1 - drivers/iio/light/ltr501.c | 1 - drivers/iio/light/opt3001.c | 804 ++ drivers/iio/light/pa12203001.c | 483 + drivers/iio/light/rpr0521.c | 615 ++ drivers/iio/light/stk3310.c | 7 +- drivers/iio/light/tcs3414.c | 1 - drivers/iio/light/tcs3472.c | 1 - drivers/iio/light/tsl4531.c | 1 - drivers/iio/light/vcnl4000.c | 1 - drivers/iio/magnetometer/bmc150_magn.c | 101 +- drivers/iio/magnetometer/mmc35240.c | 33 +- drivers/iio/magnetometer/st_magn.h | 3 + drivers/iio/magnetometer/st_magn_buffer.c | 7 + drivers/iio/magnetometer/st_magn_core.c | 98 +- drivers/iio/magnetometer/st_magn_i2c.c | 6 +- drivers/iio/magnetometer/st_magn_spi.c | 1 + drivers/iio/pressure/Kconfig | 6 +- drivers/iio/pressure/ms5611.h | 16 +- drivers/iio/pressure/ms5611_core.c | 82 +- drivers/iio/pressure/ms5611_i2c.c | 6 +- drivers/iio/pressure/ms5611_spi.c | 6 +- drivers/iio/pressure/st_pressure_core.c | 3 + drivers/iio/pressure/st_pressure_i2c.c | 1 - drivers/iio/temperature/mlx90614.c | 21 +- drivers/iio/temperature/tmp006.c | 5 +- drivers/infiniband/Kconfig | 3 - drivers/infiniband/core/Makefile | 3 +- drivers/infiniband/core/cache.c | 773 +- drivers/infiniband/core/cm.c | 225 +- drivers/infiniband/core/cma.c | 683 +- drivers/infiniband/core/core_priv.h | 54 +- drivers/infiniband/core/device.c | 335 +- drivers/infiniband/core/mad.c | 28 +- drivers/infiniband/core/mad_priv.h | 1 - drivers/infiniband/core/multicast.c | 7 +- drivers/infiniband/core/netlink.c | 55 + drivers/infiniband/core/roce_gid_mgmt.c | 747 ++ drivers/infiniband/core/sa_query.c | 515 +- drivers/infiniband/core/sysfs.c | 51 +- drivers/infiniband/core/ucm.c | 9 +- drivers/infiniband/core/ucma.c | 151 +- drivers/infiniband/core/user_mad.c | 6 +- drivers/infiniband/core/uverbs.h | 13 +- drivers/infiniband/core/uverbs_cmd.c | 137 +- drivers/infiniband/core/uverbs_main.c | 405 +- drivers/infiniband/core/verbs.c | 198 +- drivers/infiniband/hw/Makefile | 3 - drivers/infiniband/hw/amso1100/Kbuild | 6 - drivers/infiniband/hw/amso1100/Kconfig | 15 - drivers/infiniband/hw/amso1100/c2.c | 1241 --- drivers/infiniband/hw/amso1100/c2.h | 547 - drivers/infiniband/hw/amso1100/c2_ae.c | 327 - drivers/infiniband/hw/amso1100/c2_ae.h | 108 - drivers/infiniband/hw/amso1100/c2_alloc.c | 142 - drivers/infiniband/hw/amso1100/c2_cm.c | 461 - drivers/infiniband/hw/amso1100/c2_cq.c | 440 - drivers/infiniband/hw/amso1100/c2_intr.c | 219 - drivers/infiniband/hw/amso1100/c2_mm.c | 377 - drivers/infiniband/hw/amso1100/c2_mq.c | 174 - drivers/infiniband/hw/amso1100/c2_mq.h | 106 - drivers/infiniband/hw/amso1100/c2_pd.c | 90 - drivers/infiniband/hw/amso1100/c2_provider.c | 912 -- drivers/infiniband/hw/amso1100/c2_provider.h | 182 - drivers/infiniband/hw/amso1100/c2_qp.c | 1024 -- drivers/infiniband/hw/amso1100/c2_rnic.c | 655 -- drivers/infiniband/hw/amso1100/c2_status.h | 158 - drivers/infiniband/hw/amso1100/c2_user.h | 82 - drivers/infiniband/hw/amso1100/c2_vq.c | 260 - drivers/infiniband/hw/amso1100/c2_vq.h | 63 - drivers/infiniband/hw/amso1100/c2_wr.h | 1520 --- drivers/infiniband/hw/cxgb3/iwch_provider.c | 14 +- drivers/infiniband/hw/cxgb4/cm.c | 82 +- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 4 +- drivers/infiniband/hw/cxgb4/mem.c | 12 +- drivers/infiniband/hw/cxgb4/provider.c | 2 +- drivers/infiniband/hw/ehca/Kconfig | 9 - drivers/infiniband/hw/ehca/Makefile | 16 - drivers/infiniband/hw/ehca/ehca_av.c | 277 - drivers/infiniband/hw/ehca/ehca_classes.h | 482 - drivers/infiniband/hw/ehca/ehca_classes_pSeries.h | 208 - drivers/infiniband/hw/ehca/ehca_cq.c | 397 - drivers/infiniband/hw/ehca/ehca_eq.c | 189 - drivers/infiniband/hw/ehca/ehca_hca.c | 414 - drivers/infiniband/hw/ehca/ehca_irq.c | 870 -- drivers/infiniband/hw/ehca/ehca_irq.h | 77 - drivers/infiniband/hw/ehca/ehca_iverbs.h | 218 - drivers/infiniband/hw/ehca/ehca_main.c | 1123 -- drivers/infiniband/hw/ehca/ehca_mcast.c | 131 - drivers/infiniband/hw/ehca/ehca_mrmw.c | 2593 ----- drivers/infiniband/hw/ehca/ehca_mrmw.h | 132 - drivers/infiniband/hw/ehca/ehca_pd.c | 124 - drivers/infiniband/hw/ehca/ehca_qes.h | 260 - drivers/infiniband/hw/ehca/ehca_qp.c | 2257 ---- drivers/infiniband/hw/ehca/ehca_reqs.c | 953 -- drivers/infiniband/hw/ehca/ehca_sqp.c | 245 - drivers/infiniband/hw/ehca/ehca_tools.h | 155 - drivers/infiniband/hw/ehca/ehca_uverbs.c | 309 - drivers/infiniband/hw/ehca/hcp_if.c | 949 -- drivers/infiniband/hw/ehca/hcp_if.h | 265 - drivers/infiniband/hw/ehca/hcp_phyp.c | 82 - drivers/infiniband/hw/ehca/hcp_phyp.h | 90 - drivers/infiniband/hw/ehca/hipz_fns.h | 68 - drivers/infiniband/hw/ehca/hipz_fns_core.h | 100 - drivers/infiniband/hw/ehca/hipz_hw.h | 414 - drivers/infiniband/hw/ehca/ipz_pt_fn.c | 289 - drivers/infiniband/hw/ehca/ipz_pt_fn.h | 289 - drivers/infiniband/hw/ipath/Kconfig | 14 - drivers/infiniband/hw/ipath/Makefile | 37 - drivers/infiniband/hw/ipath/ipath_common.h | 851 -- drivers/infiniband/hw/ipath/ipath_cq.c | 483 - drivers/infiniband/hw/ipath/ipath_debug.h | 99 - drivers/infiniband/hw/ipath/ipath_diag.c | 551 - drivers/infiniband/hw/ipath/ipath_dma.c | 179 - drivers/infiniband/hw/ipath/ipath_driver.c | 2789 ----- drivers/infiniband/hw/ipath/ipath_eeprom.c | 1183 -- drivers/infiniband/hw/ipath/ipath_file_ops.c | 2620 ----- drivers/infiniband/hw/ipath/ipath_fs.c | 422 - drivers/infiniband/hw/ipath/ipath_iba6110.c | 1940 ---- drivers/infiniband/hw/ipath/ipath_init_chip.c | 1066 -- drivers/infiniband/hw/ipath/ipath_intr.c | 1273 --- drivers/infiniband/hw/ipath/ipath_kernel.h | 1373 --- drivers/infiniband/hw/ipath/ipath_keys.c | 270 - drivers/infiniband/hw/ipath/ipath_mad.c | 1521 --- drivers/infiniband/hw/ipath/ipath_mmap.c | 174 - drivers/infiniband/hw/ipath/ipath_mr.c | 425 - drivers/infiniband/hw/ipath/ipath_qp.c | 1080 -- drivers/infiniband/hw/ipath/ipath_rc.c | 1969 ---- drivers/infiniband/hw/ipath/ipath_registers.h | 512 - drivers/infiniband/hw/ipath/ipath_ruc.c | 734 -- drivers/infiniband/hw/ipath/ipath_sdma.c | 818 -- drivers/infiniband/hw/ipath/ipath_srq.c | 380 - drivers/infiniband/hw/ipath/ipath_stats.c | 347 - drivers/infiniband/hw/ipath/ipath_sysfs.c | 1238 --- drivers/infiniband/hw/ipath/ipath_uc.c | 547 - drivers/infiniband/hw/ipath/ipath_ud.c | 580 - drivers/infiniband/hw/ipath/ipath_user_pages.c | 229 - drivers/infiniband/hw/ipath/ipath_user_sdma.c | 875 -- drivers/infiniband/hw/ipath/ipath_user_sdma.h | 52 - drivers/infiniband/hw/ipath/ipath_verbs.c | 2364 ---- drivers/infiniband/hw/ipath/ipath_verbs.h | 939 -- drivers/infiniband/hw/ipath/ipath_verbs_mcast.c | 364 - drivers/infiniband/hw/ipath/ipath_wc_ppc64.c | 49 - drivers/infiniband/hw/ipath/ipath_wc_x86_64.c | 144 - drivers/infiniband/hw/mlx4/ah.c | 2 +- drivers/infiniband/hw/mlx4/cq.c | 2 +- drivers/infiniband/hw/mlx4/mad.c | 23 +- drivers/infiniband/hw/mlx4/main.c | 891 +- drivers/infiniband/hw/mlx4/mcg.c | 8 +- drivers/infiniband/hw/mlx4/mlx4_ib.h | 40 +- drivers/infiniband/hw/mlx4/mr.c | 11 +- drivers/infiniband/hw/mlx4/qp.c | 10 +- drivers/infiniband/hw/mlx5/cq.c | 10 +- drivers/infiniband/hw/mlx5/main.c | 79 +- drivers/infiniband/hw/mlx5/mlx5_ib.h | 16 +- drivers/infiniband/hw/mlx5/mr.c | 112 +- drivers/infiniband/hw/mlx5/qp.c | 9 +- drivers/infiniband/hw/mthca/mthca_provider.c | 1 + drivers/infiniband/hw/nes/nes_verbs.c | 19 +- drivers/infiniband/hw/ocrdma/ocrdma.h | 1 - drivers/infiniband/hw/ocrdma/ocrdma_main.c | 236 +- drivers/infiniband/hw/ocrdma/ocrdma_sli.h | 2 + drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 56 +- drivers/infiniband/hw/ocrdma/ocrdma_verbs.h | 15 +- drivers/infiniband/hw/qib/qib_file_ops.c | 2 +- drivers/infiniband/hw/qib/qib_mad.h | 147 +- drivers/infiniband/hw/qib/qib_mmap.c | 2 +- drivers/infiniband/hw/qib/qib_mr.c | 9 +- drivers/infiniband/hw/qib/qib_ruc.c | 1 + drivers/infiniband/hw/qib/qib_verbs.c | 3 +- drivers/infiniband/hw/qib/qib_verbs.h | 4 +- drivers/infiniband/hw/usnic/usnic.h | 21 +- drivers/infiniband/hw/usnic/usnic_abi.h | 21 +- drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h | 21 +- drivers/infiniband/hw/usnic/usnic_common_util.h | 21 +- drivers/infiniband/hw/usnic/usnic_debugfs.c | 21 +- drivers/infiniband/hw/usnic/usnic_debugfs.h | 21 +- drivers/infiniband/hw/usnic/usnic_fwd.c | 21 +- drivers/infiniband/hw/usnic/usnic_fwd.h | 21 +- drivers/infiniband/hw/usnic/usnic_ib.h | 21 +- drivers/infiniband/hw/usnic/usnic_ib_main.c | 21 +- drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c | 21 +- drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h | 21 +- drivers/infiniband/hw/usnic/usnic_ib_sysfs.c | 21 +- drivers/infiniband/hw/usnic/usnic_ib_sysfs.h | 21 +- drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 21 +- drivers/infiniband/hw/usnic/usnic_ib_verbs.h | 21 +- drivers/infiniband/hw/usnic/usnic_log.h | 21 +- drivers/infiniband/hw/usnic/usnic_transport.c | 21 +- drivers/infiniband/hw/usnic/usnic_transport.h | 21 +- drivers/infiniband/hw/usnic/usnic_uiom.c | 2 +- drivers/infiniband/hw/usnic/usnic_uiom.h | 21 +- .../infiniband/hw/usnic/usnic_uiom_interval_tree.c | 21 +- .../infiniband/hw/usnic/usnic_uiom_interval_tree.h | 21 +- drivers/infiniband/hw/usnic/usnic_vnic.c | 21 +- drivers/infiniband/hw/usnic/usnic_vnic.h | 21 +- drivers/infiniband/ulp/ipoib/ipoib.h | 6 +- drivers/infiniband/ulp/ipoib/ipoib_cm.c | 4 +- drivers/infiniband/ulp/ipoib/ipoib_main.c | 256 +- drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 58 +- drivers/infiniband/ulp/ipoib/ipoib_verbs.c | 22 +- drivers/infiniband/ulp/iser/iscsi_iser.c | 82 +- drivers/infiniband/ulp/iser/iscsi_iser.h | 205 +- drivers/infiniband/ulp/iser/iser_initiator.c | 30 +- drivers/infiniband/ulp/iser/iser_memory.c | 490 +- drivers/infiniband/ulp/iser/iser_verbs.c | 360 +- drivers/infiniband/ulp/isert/ib_isert.c | 322 +- drivers/infiniband/ulp/isert/ib_isert.h | 22 +- drivers/infiniband/ulp/srp/ib_srp.c | 270 +- drivers/infiniband/ulp/srp/ib_srp.h | 25 +- drivers/infiniband/ulp/srpt/ib_srpt.c | 22 +- drivers/infiniband/ulp/srpt/ib_srpt.h | 1 - drivers/input/ff-core.c | 5 +- drivers/input/gameport/gameport.c | 4 +- drivers/input/input.c | 10 +- drivers/input/joydev.c | 11 +- drivers/input/joystick/Kconfig | 1 + drivers/input/joystick/analog.c | 4 +- drivers/input/joystick/walkera0701.c | 4 +- drivers/input/joystick/zhenhua.c | 13 +- drivers/input/keyboard/Kconfig | 17 +- drivers/input/keyboard/Makefile | 1 + drivers/input/keyboard/adp5589-keys.c | 1 - drivers/input/keyboard/cap11xx.c | 145 +- drivers/input/keyboard/gpio_keys.c | 9 +- drivers/input/keyboard/gpio_keys_polled.c | 5 +- drivers/input/keyboard/imx_keypad.c | 2 - drivers/input/keyboard/lm8333.c | 1 - drivers/input/keyboard/matrix_keypad.c | 6 +- drivers/input/keyboard/mcs_touchkey.c | 1 - drivers/input/keyboard/mpr121_touchkey.c | 1 - drivers/input/keyboard/omap4-keypad.c | 2 +- drivers/input/keyboard/pmic8xxx-keypad.c | 10 +- drivers/input/keyboard/qt1070.c | 1 - drivers/input/keyboard/qt2160.c | 1 - drivers/input/keyboard/samsung-keypad.c | 6 +- drivers/input/keyboard/snvs_pwrkey.c | 227 + drivers/input/keyboard/tc3589x-keypad.c | 63 +- drivers/input/keyboard/tca8418_keypad.c | 1 - drivers/input/misc/Kconfig | 29 +- drivers/input/misc/Makefile | 1 - drivers/input/misc/ab8500-ponkey.c | 1 + drivers/input/misc/adxl34x-i2c.c | 1 - drivers/input/misc/arizona-haptics.c | 26 +- drivers/input/misc/bma150.c | 8 +- drivers/input/misc/cma3000_d0x_i2c.c | 1 - drivers/input/misc/drv260x.c | 9 +- drivers/input/misc/drv2665.c | 5 +- drivers/input/misc/drv2667.c | 7 +- drivers/input/misc/gp2ap002a00f.c | 2 +- drivers/input/misc/kxtj9.c | 1 - drivers/input/misc/max77693-haptic.c | 92 +- drivers/input/misc/max77843-haptic.c | 358 - drivers/input/misc/max8997_haptic.c | 3 +- drivers/input/misc/mpu3050.c | 1 - drivers/input/misc/pcf8574_keypad.c | 1 - drivers/input/misc/pm8941-pwrkey.c | 2 +- drivers/input/misc/pmic8xxx-pwrkey.c | 268 +- drivers/input/misc/pwm-beeper.c | 1 + drivers/input/misc/rb532_button.c | 1 + drivers/input/misc/regulator-haptic.c | 1 + drivers/input/misc/sparcspkr.c | 2 + drivers/input/misc/uinput.c | 4 +- drivers/input/misc/xen-kbdfront.c | 4 +- drivers/input/mouse/Kconfig | 2 +- drivers/input/mouse/Makefile | 2 +- drivers/input/mouse/alps.c | 48 +- drivers/input/mouse/cyapa.c | 183 +- drivers/input/mouse/cyapa.h | 157 +- drivers/input/mouse/cyapa_gen3.c | 15 +- drivers/input/mouse/cyapa_gen5.c | 1255 ++- drivers/input/mouse/cyapa_gen6.c | 745 ++ drivers/input/mouse/elan_i2c.h | 2 +- drivers/input/mouse/elan_i2c_core.c | 54 +- drivers/input/mouse/elan_i2c_i2c.c | 4 +- drivers/input/mouse/elan_i2c_smbus.c | 4 +- drivers/input/mouse/psmouse-base.c | 4 + drivers/input/mouse/sentelic.c | 14 +- drivers/input/mouse/synaptics_i2c.c | 1 - drivers/input/serio/ambakmi.c | 8 +- drivers/input/serio/i8042.c | 45 +- drivers/input/serio/i8042.h | 13 + drivers/input/serio/libps2.c | 22 +- drivers/input/serio/parkbd.c | 1 + drivers/input/serio/serio.c | 5 +- drivers/input/touchscreen/Kconfig | 46 +- drivers/input/touchscreen/Makefile | 4 +- drivers/input/touchscreen/ad7879-i2c.c | 1 - drivers/input/touchscreen/ads7846.c | 11 +- drivers/input/touchscreen/ar1021_i2c.c | 1 - drivers/input/touchscreen/atmel_mxt_ts.c | 240 +- drivers/input/touchscreen/auo-pixcir-ts.c | 1 - drivers/input/touchscreen/bu21013_ts.c | 1 - drivers/input/touchscreen/chipone_icn8318.c | 1 - drivers/input/touchscreen/colibri-vf50-ts.c | 386 + drivers/input/touchscreen/cy8ctmg110_ts.c | 1 - drivers/input/touchscreen/cyttsp4_i2c.c | 2 - drivers/input/touchscreen/cyttsp_i2c.c | 2 - drivers/input/touchscreen/edt-ft5x06.c | 3 +- drivers/input/touchscreen/egalax_ts.c | 2 +- drivers/input/touchscreen/elants_i2c.c | 188 +- drivers/input/touchscreen/goodix.c | 2 +- drivers/input/touchscreen/ili210x.c | 5 +- drivers/input/touchscreen/imx6ul_tsc.c | 533 + drivers/input/touchscreen/lpc32xx_ts.c | 4 +- drivers/input/touchscreen/max11801_ts.c | 1 - drivers/input/touchscreen/mms114.c | 6 +- drivers/input/touchscreen/of_touchscreen.c | 68 +- drivers/input/touchscreen/pixcir_i2c_ts.c | 147 +- drivers/input/touchscreen/st1232.c | 1 - drivers/input/touchscreen/sun4i-ts.c | 8 +- drivers/input/touchscreen/sur40.c | 1 + drivers/input/touchscreen/tsc2005.c | 264 +- drivers/input/touchscreen/tsc2007.c | 1 - drivers/input/touchscreen/wacom_i2c.c | 1 - drivers/input/touchscreen/wdt87xx_i2c.c | 49 +- drivers/input/touchscreen/wm97xx-core.c | 13 +- drivers/input/touchscreen/zforce_ts.c | 98 +- drivers/iommu/Kconfig | 6 +- drivers/iommu/amd_iommu.c | 34 +- drivers/iommu/amd_iommu_init.c | 5 +- drivers/iommu/amd_iommu_types.h | 1 + drivers/iommu/amd_iommu_v2.c | 11 +- drivers/iommu/arm-smmu-v3.c | 87 +- drivers/iommu/arm-smmu.c | 45 +- drivers/iommu/dmar.c | 2 +- drivers/iommu/intel-iommu.c | 767 +- drivers/iommu/intel_irq_remapping.c | 6 +- drivers/iommu/io-pgtable-arm.c | 128 +- drivers/iommu/io-pgtable.c | 5 - drivers/iommu/io-pgtable.h | 14 +- drivers/iommu/iommu.c | 2 +- drivers/iommu/iova.c | 120 +- drivers/iommu/ipmmu-vmsa.c | 19 +- drivers/iommu/irq_remapping.c | 2 +- drivers/iommu/msm_iommu.c | 4 +- drivers/iommu/of_iommu.c | 8 +- drivers/iommu/omap-iommu-debug.c | 132 +- drivers/iommu/omap-iommu.c | 198 +- drivers/iommu/omap-iommu.h | 79 +- drivers/iommu/omap-iopgtable.h | 27 +- drivers/iommu/tegra-smmu.c | 297 +- drivers/irqchip/Kconfig | 10 + drivers/irqchip/Makefile | 5 +- drivers/irqchip/exynos-combiner.c | 22 +- drivers/irqchip/irq-armada-370-xp.c | 12 +- drivers/irqchip/irq-atmel-aic.c | 4 +- drivers/irqchip/irq-atmel-aic5.c | 4 +- drivers/irqchip/irq-bcm2835.c | 113 +- drivers/irqchip/irq-bcm2836.c | 275 + drivers/irqchip/irq-bcm7038-l1.c | 9 +- drivers/irqchip/irq-bcm7120-l2.c | 78 +- drivers/irqchip/irq-brcmstb-l2.c | 11 +- drivers/irqchip/irq-clps711x.c | 9 +- drivers/irqchip/irq-crossbar.c | 3 +- drivers/irqchip/irq-digicolor.c | 3 +- drivers/irqchip/irq-dw-apb-ictl.c | 58 +- drivers/irqchip/irq-gic-v2m.c | 56 +- drivers/irqchip/irq-gic-v3-its-pci-msi.c | 140 + drivers/irqchip/irq-gic-v3-its-platform-msi.c | 93 + drivers/irqchip/irq-gic-v3-its.c | 147 +- drivers/irqchip/irq-gic-v3.c | 96 +- drivers/irqchip/irq-gic.c | 252 +- drivers/irqchip/irq-hip04.c | 10 +- drivers/irqchip/irq-i8259.c | 384 + drivers/irqchip/irq-imgpdc.c | 13 +- drivers/irqchip/irq-imx-gpcv2.c | 278 + drivers/irqchip/irq-ingenic.c | 3 +- drivers/irqchip/irq-keystone.c | 8 +- drivers/irqchip/irq-metag-ext.c | 11 +- drivers/irqchip/irq-metag.c | 5 +- drivers/irqchip/irq-mips-cpu.c | 3 +- drivers/irqchip/irq-mips-gic.c | 176 +- drivers/irqchip/irq-mmp.c | 9 +- drivers/irqchip/irq-moxart.c | 3 +- drivers/irqchip/irq-mtk-sysirq.c | 3 +- drivers/irqchip/irq-mxs.c | 4 +- drivers/irqchip/irq-nvic.c | 3 +- drivers/irqchip/irq-omap-intc.c | 38 +- drivers/irqchip/irq-or1k-pic.c | 3 +- drivers/irqchip/irq-orion.c | 11 +- drivers/irqchip/irq-renesas-h8300h.c | 2 - drivers/irqchip/irq-renesas-h8s.c | 2 +- drivers/irqchip/irq-renesas-intc-irqpin.c | 11 +- drivers/irqchip/irq-renesas-irqc.c | 26 +- drivers/irqchip/irq-s3c24xx.c | 25 +- drivers/irqchip/irq-sa11x0.c | 1 - drivers/irqchip/irq-sirfsoc.c | 50 +- drivers/irqchip/irq-sun4i.c | 5 +- drivers/irqchip/irq-sunxi-nmi.c | 9 +- drivers/irqchip/irq-tb10x.c | 9 +- drivers/irqchip/irq-tegra.c | 4 +- drivers/irqchip/irq-versatile-fpga.c | 16 +- drivers/irqchip/irq-vf610-mscm-ir.c | 3 +- drivers/irqchip/irq-vic.c | 11 +- drivers/irqchip/irq-vt8500.c | 10 +- drivers/irqchip/irq-xtensa-mx.c | 3 +- drivers/irqchip/irq-xtensa-pic.c | 3 +- drivers/irqchip/irq-zevio.c | 3 +- drivers/irqchip/irqchip.h | 11 - drivers/irqchip/spear-shirq.c | 8 +- drivers/isdn/hisax/isdnl2.c | 20 +- drivers/isdn/icn/icn.h | 2 +- drivers/isdn/mISDN/dsp_audio.c | 22 +- drivers/isdn/mISDN/dsp_cmx.c | 2 +- drivers/isdn/mISDN/layer2.c | 54 +- drivers/leds/Kconfig | 39 +- drivers/leds/Makefile | 2 +- drivers/leds/leds-aat1290.c | 3 +- drivers/leds/leds-bcm6328.c | 1 + drivers/leds/leds-bcm6358.c | 1 + drivers/leds/leds-fsg.c | 52 +- drivers/leds/leds-ktd2692.c | 1 + drivers/leds/leds-lm3530.c | 1 - drivers/leds/leds-lm355x.c | 1 - drivers/leds/leds-lm3642.c | 1 - drivers/leds/leds-lp5521.c | 11 +- drivers/leds/leds-lp5523.c | 11 +- drivers/leds/leds-lp5562.c | 11 +- drivers/leds/leds-lp55xx-common.c | 13 +- drivers/leds/leds-lp55xx-common.h | 4 +- drivers/leds/leds-lp8501.c | 11 +- drivers/leds/leds-lp8860.c | 4 +- drivers/leds/leds-max77693.c | 2 + drivers/leds/leds-ns2.c | 170 +- drivers/leds/leds-pca955x.c | 1 - drivers/leds/leds-pca963x.c | 2 +- drivers/leds/leds-pm8941-wled.c | 435 - drivers/leds/leds-powernv.c | 345 + drivers/leds/leds-syscon.c | 4 +- drivers/leds/leds-tca6507.c | 2 +- drivers/leds/leds-tlc591xx.c | 4 - drivers/leds/trigger/Kconfig | 2 +- drivers/macintosh/therm_windtunnel.c | 2 + drivers/macintosh/windfarm.h | 4 - drivers/macintosh/windfarm_core.c | 45 +- drivers/mailbox/Kconfig | 1 + drivers/mailbox/arm_mhu.c | 4 +- drivers/mailbox/bcm2835-mailbox.c | 1 - drivers/mailbox/mailbox.c | 27 +- drivers/mailbox/pcc.c | 8 +- drivers/mcb/mcb-pci.c | 6 +- drivers/md/Kconfig | 4 +- drivers/md/bcache/bcache.h | 18 - drivers/md/bcache/btree.c | 10 +- drivers/md/bcache/closure.h | 2 +- drivers/md/bcache/io.c | 101 +- drivers/md/bcache/journal.c | 12 +- drivers/md/bcache/movinggc.c | 8 +- drivers/md/bcache/request.c | 43 +- drivers/md/bcache/super.c | 48 +- drivers/md/bcache/util.h | 5 +- drivers/md/bcache/writeback.c | 14 +- drivers/md/dm-bio-prison.c | 6 +- drivers/md/dm-bufio.c | 26 +- drivers/md/dm-cache-metadata.c | 2 +- drivers/md/dm-cache-policy-smq.c | 110 +- drivers/md/dm-cache-target.c | 80 +- drivers/md/dm-crypt.c | 30 +- drivers/md/dm-delay.c | 16 +- drivers/md/dm-era-target.c | 15 - drivers/md/dm-exception-store.c | 6 +- drivers/md/dm-exception-store.h | 5 +- drivers/md/dm-flakey.c | 24 +- drivers/md/dm-io.c | 8 +- drivers/md/dm-ioctl.c | 4 +- drivers/md/dm-linear.c | 23 +- drivers/md/dm-log-writes.c | 38 +- drivers/md/dm-mpath.c | 27 +- drivers/md/dm-raid.c | 19 - drivers/md/dm-raid1.c | 32 +- drivers/md/dm-snap-persistent.c | 32 +- drivers/md/dm-snap-transient.c | 3 +- drivers/md/dm-snap.c | 49 +- drivers/md/dm-stripe.c | 31 +- drivers/md/dm-table.c | 21 - drivers/md/dm-thin.c | 161 +- drivers/md/dm-verity.c | 42 +- drivers/md/dm-zero.c | 2 +- drivers/md/dm.c | 136 +- drivers/md/dm.h | 2 - drivers/md/faulty.c | 4 +- drivers/md/linear.c | 45 +- drivers/md/md-cluster.c | 159 +- drivers/md/md.c | 164 +- drivers/md/md.h | 12 - drivers/md/multipath.c | 36 +- drivers/md/persistent-data/dm-block-manager.c | 8 +- drivers/md/persistent-data/dm-btree-remove.c | 39 +- drivers/md/persistent-data/dm-btree.c | 8 +- drivers/md/raid0.c | 58 +- drivers/md/raid0.h | 2 - drivers/md/raid1.c | 167 +- drivers/md/raid1.h | 5 + drivers/md/raid10.c | 261 +- drivers/md/raid10.h | 6 + drivers/md/raid5.c | 304 +- drivers/md/raid5.h | 5 +- drivers/media/common/saa7146/saa7146_hlp.c | 9 +- drivers/media/dvb-core/dvb_ca_en50221.c | 167 +- drivers/media/dvb-core/dvb_ca_en50221.h | 34 +- drivers/media/dvb-core/dvb_frontend.c | 1 - drivers/media/dvb-core/dvb_frontend.h | 410 +- drivers/media/dvb-core/dvb_math.h | 25 +- drivers/media/dvb-core/dvb_net.c | 2 +- drivers/media/dvb-core/dvb_ringbuffer.h | 135 +- drivers/media/dvb-core/dvbdev.h | 116 +- drivers/media/dvb-frontends/Kconfig | 32 +- drivers/media/dvb-frontends/Makefile | 4 + drivers/media/dvb-frontends/a8293.c | 168 +- drivers/media/dvb-frontends/a8293.h | 22 - drivers/media/dvb-frontends/af9033.c | 1 - drivers/media/dvb-frontends/ascot2e.c | 548 + drivers/media/dvb-frontends/ascot2e.h | 58 + drivers/media/dvb-frontends/au8522_decoder.c | 1 - drivers/media/dvb-frontends/au8522_dig.c | 2 +- drivers/media/dvb-frontends/cx24123.c | 2 +- drivers/media/dvb-frontends/cxd2841er.c | 2727 +++++ drivers/media/dvb-frontends/cxd2841er.h | 65 + drivers/media/dvb-frontends/cxd2841er_priv.h | 43 + drivers/media/dvb-frontends/dvb-pll.c | 50 +- drivers/media/dvb-frontends/horus3a.c | 430 + drivers/media/dvb-frontends/horus3a.h | 58 + drivers/media/dvb-frontends/lnbh25.c | 189 + drivers/media/dvb-frontends/lnbh25.h | 56 + drivers/media/dvb-frontends/m88ds3103.c | 74 +- drivers/media/dvb-frontends/rtl2830.c | 1 - drivers/media/dvb-frontends/rtl2832.c | 1 - drivers/media/dvb-frontends/rtl2832_sdr.c | 1 - drivers/media/dvb-frontends/s921.c | 2 +- drivers/media/dvb-frontends/si2168.c | 5 +- drivers/media/dvb-frontends/sp2.c | 1 - drivers/media/dvb-frontends/stv0367.c | 17 +- drivers/media/dvb-frontends/tda10071.c | 825 +- drivers/media/dvb-frontends/tda10071.h | 63 +- drivers/media/dvb-frontends/tda10071_priv.h | 20 +- drivers/media/dvb-frontends/ts2020.c | 1 - drivers/media/i2c/Kconfig | 15 +- drivers/media/i2c/Makefile | 1 + drivers/media/i2c/adp1653.c | 2 +- drivers/media/i2c/adv7170.c | 1 - drivers/media/i2c/adv7175.c | 1 - drivers/media/i2c/adv7180.c | 12 +- drivers/media/i2c/adv7343.c | 8 - drivers/media/i2c/adv7393.c | 7 - drivers/media/i2c/adv7511.c | 3 +- drivers/media/i2c/adv7604.c | 486 +- drivers/media/i2c/adv7842.c | 28 +- drivers/media/i2c/ak881x.c | 8 +- drivers/media/i2c/bt819.c | 12 - drivers/media/i2c/bt856.c | 1 - drivers/media/i2c/bt866.c | 1 - drivers/media/i2c/cs5345.c | 8 - drivers/media/i2c/cs53l32a.c | 1 - drivers/media/i2c/cx25840/cx25840-core.c | 1 - drivers/media/i2c/ir-kbd-i2c.c | 1 + drivers/media/i2c/ks0127.c | 1 - drivers/media/i2c/m52790.c | 1 - drivers/media/i2c/msp3400-driver.c | 1 - drivers/media/i2c/mt9v011.c | 1 - drivers/media/i2c/mt9v032.c | 2 +- drivers/media/i2c/ov2659.c | 4 - drivers/media/i2c/ov7640.c | 1 - drivers/media/i2c/ov7670.c | 1 - drivers/media/i2c/ov9650.c | 2 +- drivers/media/i2c/s5c73m3/s5c73m3-spi.c | 1 - drivers/media/i2c/s5k6a3.c | 1 + drivers/media/i2c/saa6588.c | 5 +- drivers/media/i2c/saa6752hs.c | 1 - drivers/media/i2c/saa7110.c | 12 - drivers/media/i2c/saa7115.c | 1 - drivers/media/i2c/saa7127.c | 1 - drivers/media/i2c/saa717x.c | 8 - drivers/media/i2c/saa7185.c | 1 - drivers/media/i2c/soc_camera/mt9t112.c | 8 +- drivers/media/i2c/soc_camera/tw9910.c | 35 +- drivers/media/i2c/sony-btf-mpx.c | 1 - drivers/media/i2c/sr030pc30.c | 15 +- drivers/media/i2c/tc358743.c | 1979 ++++ drivers/media/i2c/tc358743_regs.h | 681 ++ drivers/media/i2c/tda7432.c | 8 - drivers/media/i2c/tda9840.c | 1 - drivers/media/i2c/tea6415c.c | 1 - drivers/media/i2c/tea6420.c | 1 - drivers/media/i2c/ths7303.c | 1 - drivers/media/i2c/tlv320aic23b.c | 7 - drivers/media/i2c/tvaudio.c | 1 - drivers/media/i2c/tvp514x.c | 11 - drivers/media/i2c/tvp5150.c | 1 - drivers/media/i2c/tvp7002.c | 7 - drivers/media/i2c/tw9903.c | 1 - drivers/media/i2c/tw9906.c | 1 - drivers/media/i2c/upd64031a.c | 1 - drivers/media/i2c/upd64083.c | 1 - drivers/media/i2c/vp27smpx.c | 1 - drivers/media/i2c/vpx3220.c | 8 - drivers/media/i2c/wm8739.c | 8 - drivers/media/i2c/wm8775.c | 1 - drivers/media/media-entity.c | 6 +- drivers/media/pci/Kconfig | 7 +- drivers/media/pci/Makefile | 3 +- drivers/media/pci/bt8xx/btcx-risc.c | 5 +- drivers/media/pci/bt8xx/bttv-input.c | 21 +- drivers/media/pci/bt8xx/bttvp.h | 2 +- drivers/media/pci/cobalt/Kconfig | 3 +- drivers/media/pci/cobalt/cobalt-driver.c | 11 +- drivers/media/pci/cobalt/cobalt-v4l2.c | 11 +- drivers/media/pci/ivtv/ivtv-gpio.c | 7 - drivers/media/pci/mantis/mantis_dma.c | 4 +- drivers/media/pci/netup_unidvb/Kconfig | 12 + drivers/media/pci/netup_unidvb/Makefile | 9 + drivers/media/pci/netup_unidvb/netup_unidvb.h | 133 + drivers/media/pci/netup_unidvb/netup_unidvb_ci.c | 248 + drivers/media/pci/netup_unidvb/netup_unidvb_core.c | 1001 ++ drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c | 381 + drivers/media/pci/netup_unidvb/netup_unidvb_spi.c | 248 + drivers/media/pci/smipcie/Kconfig | 1 + drivers/media/pci/smipcie/Makefile | 3 + drivers/media/pci/smipcie/smipcie-ir.c | 232 + drivers/media/pci/smipcie/smipcie-main.c | 1114 ++ drivers/media/pci/smipcie/smipcie.c | 1102 -- drivers/media/pci/smipcie/smipcie.h | 19 + drivers/media/pci/solo6x10/solo6x10-core.c | 18 +- drivers/media/pci/solo6x10/solo6x10-g723.c | 13 +- drivers/media/pci/solo6x10/solo6x10.h | 26 +- drivers/media/pci/ttpci/budget-av.c | 2 +- drivers/media/pci/ttpci/ttpci-eeprom.c | 9 +- drivers/media/pci/tw68/tw68-core.c | 21 +- drivers/media/pci/tw68/tw68.h | 16 - drivers/media/pci/zoran/zoran.h | 7 +- drivers/media/pci/zoran/zoran_card.c | 11 +- drivers/media/pci/zoran/zoran_device.c | 18 +- drivers/media/pci/zoran/zoran_driver.c | 344 +- drivers/media/platform/Kconfig | 27 +- drivers/media/platform/Makefile | 2 + drivers/media/platform/coda/Makefile | 2 +- drivers/media/platform/coda/coda-bit.c | 147 +- drivers/media/platform/coda/coda-common.c | 338 +- drivers/media/platform/coda/coda-gdi.c | 150 + drivers/media/platform/coda/coda.h | 15 +- drivers/media/platform/coda/coda_regs.h | 10 + drivers/media/platform/coda/trace.h | 89 +- drivers/media/platform/exynos4-is/fimc-m2m.c | 2 +- drivers/media/platform/fsl-viu.c | 160 +- drivers/media/platform/omap/Kconfig | 1 + drivers/media/platform/omap/omap_vout.c | 71 +- drivers/media/platform/omap3isp/isp.c | 133 +- drivers/media/platform/omap3isp/isp.h | 7 +- drivers/media/platform/omap3isp/ispcsiphy.h | 2 +- drivers/media/platform/omap3isp/ispvideo.c | 9 +- drivers/media/platform/omap3isp/omap3isp.h | 132 + drivers/media/platform/rcar_jpu.c | 1794 +++ drivers/media/platform/s5p-jpeg/jpeg-core.c | 14 +- drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c | 6 +- drivers/media/platform/s5p-mfc/s5p_mfc_enc.c | 9 +- drivers/media/platform/s5p-mfc/s5p_mfc_opr.c | 11 +- drivers/media/platform/s5p-mfc/s5p_mfc_opr.h | 2 +- drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c | 12 +- drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c | 10 +- drivers/media/platform/s5p-tv/hdmiphy_drv.c | 1 - drivers/media/platform/s5p-tv/mixer_reg.c | 12 +- drivers/media/platform/s5p-tv/sii9234_drv.c | 1 - drivers/media/platform/sh_veu.c | 10 +- drivers/media/platform/sh_vou.c | 817 +- drivers/media/platform/soc_camera/atmel-isi.c | 105 +- drivers/media/platform/soc_camera/rcar_vin.c | 16 +- .../platform/soc_camera/sh_mobile_ceu_camera.c | 3 + drivers/media/platform/soc_camera/soc_camera.c | 48 +- drivers/media/platform/sti/bdisp/bdisp-debug.c | 8 + drivers/media/platform/sti/bdisp/bdisp-hw.c | 12 +- drivers/media/platform/sti/bdisp/bdisp-v4l2.c | 76 +- drivers/media/platform/sti/c8sectpfe/Kconfig | 28 + drivers/media/platform/sti/c8sectpfe/Makefile | 9 + .../platform/sti/c8sectpfe/c8sectpfe-common.c | 265 + .../platform/sti/c8sectpfe/c8sectpfe-common.h | 64 + .../media/platform/sti/c8sectpfe/c8sectpfe-core.c | 1235 +++ .../media/platform/sti/c8sectpfe/c8sectpfe-core.h | 288 + .../platform/sti/c8sectpfe/c8sectpfe-debugfs.c | 271 + .../platform/sti/c8sectpfe/c8sectpfe-debugfs.h | 26 + .../media/platform/sti/c8sectpfe/c8sectpfe-dvb.c | 244 + .../media/platform/sti/c8sectpfe/c8sectpfe-dvb.h | 20 + drivers/media/platform/vivid/vivid-vid-cap.c | 2 +- drivers/media/platform/vivid/vivid-vid-out.c | 15 +- drivers/media/platform/vsp1/vsp1_drv.c | 13 +- drivers/media/platform/vsp1/vsp1_entity.c | 18 +- drivers/media/platform/vsp1/vsp1_entity.h | 4 +- drivers/media/platform/vsp1/vsp1_regs.h | 6 +- drivers/media/platform/vsp1/vsp1_rwpf.c | 11 + drivers/media/platform/vsp1/vsp1_video.c | 85 +- drivers/media/platform/vsp1/vsp1_video.h | 5 +- drivers/media/radio/radio-tea5764.c | 1 - drivers/media/radio/saa7706h.c | 17 +- drivers/media/radio/tef6862.c | 1 - drivers/media/radio/wl128x/fmdrv_common.c | 5 +- drivers/media/rc/Kconfig | 26 +- drivers/media/rc/ir-hix5hd2.c | 2 +- drivers/media/rc/ir-lirc-codec.c | 5 +- drivers/media/rc/keymaps/rc-lirc.c | 2 +- drivers/media/rc/keymaps/rc-lme2510.c | 132 +- drivers/media/rc/rc-ir-raw.c | 2 +- drivers/media/rc/rc-main.c | 71 +- drivers/media/tuners/Kconfig | 2 +- drivers/media/tuners/e4000.c | 1 - drivers/media/tuners/fc2580.c | 1 - drivers/media/tuners/it913x.c | 1 - drivers/media/tuners/m88rs6000t.c | 1 - drivers/media/tuners/si2157.c | 5 +- drivers/media/tuners/tda18212.c | 1 - drivers/media/tuners/tua9001.c | 1 - drivers/media/usb/airspy/airspy.c | 3 - drivers/media/usb/cx231xx/cx231xx-video.c | 4 +- drivers/media/usb/dvb-usb-v2/lmedm04.c | 21 +- drivers/media/usb/dvb-usb-v2/rtl28xxu.c | 15 +- drivers/media/usb/dvb-usb-v2/rtl28xxu.h | 2 +- drivers/media/usb/dvb-usb/pctv452e.c | 2 +- drivers/media/usb/dvb-usb/technisat-usb2.c | 2 +- drivers/media/usb/em28xx/em28xx-dvb.c | 4 - drivers/media/usb/go7007/s2250-board.c | 1 - drivers/media/usb/gspca/m5602/m5602_s5k83a.c | 2 +- drivers/media/usb/gspca/sn9c2028.c | 2 +- drivers/media/usb/stk1160/stk1160-core.c | 5 +- drivers/media/usb/stk1160/stk1160-reg.h | 34 + drivers/media/usb/stk1160/stk1160-v4l.c | 219 +- drivers/media/usb/stk1160/stk1160-video.c | 4 +- drivers/media/usb/stk1160/stk1160.h | 4 +- drivers/media/usb/ttusb-dec/ttusb_dec.c | 9 +- drivers/media/usb/usbvision/usbvision-core.c | 71 +- drivers/media/usb/usbvision/usbvision-i2c.c | 2 +- drivers/media/usb/usbvision/usbvision-video.c | 246 +- drivers/media/usb/usbvision/usbvision.h | 10 +- drivers/media/v4l2-core/Kconfig | 3 +- drivers/media/v4l2-core/Makefile | 3 + drivers/media/v4l2-core/tuner-core.c | 1 - drivers/media/v4l2-core/v4l2-ctrls.c | 15 - drivers/media/v4l2-core/v4l2-dv-timings.c | 98 +- drivers/media/v4l2-core/v4l2-event.c | 3 + drivers/media/v4l2-core/v4l2-ioctl.c | 6 +- drivers/media/v4l2-core/v4l2-mem2mem.c | 21 +- drivers/media/v4l2-core/v4l2-subdev.c | 18 + drivers/media/v4l2-core/v4l2-trace.c | 11 + drivers/media/v4l2-core/videobuf2-core.c | 13 +- drivers/media/v4l2-core/videobuf2-dma-contig.c | 207 +- drivers/media/v4l2-core/videobuf2-dma-sg.c | 91 +- drivers/media/v4l2-core/videobuf2-memops.c | 148 +- drivers/media/v4l2-core/videobuf2-vmalloc.c | 90 +- drivers/memory/Kconfig | 20 +- drivers/memory/Makefile | 1 + drivers/memory/fsl_ifc.c | 43 +- drivers/memory/omap-gpmc.c | 7 +- drivers/memory/pl172.c | 301 + drivers/memory/tegra/Makefile | 1 + drivers/memory/tegra/mc.c | 8 +- drivers/memory/tegra/mc.h | 4 + drivers/memory/tegra/tegra114.c | 18 +- drivers/memory/tegra/tegra124-emc.c | 42 +- drivers/memory/tegra/tegra124.c | 34 +- drivers/memory/tegra/tegra210.c | 1080 ++ drivers/memory/tegra/tegra30.c | 18 +- drivers/message/fusion/mptctl.c | 9 + drivers/mfd/88pm800.c | 1 - drivers/mfd/88pm805.c | 1 - drivers/mfd/88pm860x-core.c | 5 - drivers/mfd/Kconfig | 49 +- drivers/mfd/Makefile | 9 +- drivers/mfd/aat2870-core.c | 1 - drivers/mfd/ab3100-core.c | 1 - drivers/mfd/ab8500-core.c | 4 - drivers/mfd/adp5520.c | 1 - drivers/mfd/arizona-core.c | 140 +- drivers/mfd/arizona-i2c.c | 9 +- drivers/mfd/arizona-irq.c | 16 +- drivers/mfd/arizona.h | 5 + drivers/mfd/as3711.c | 1 - drivers/mfd/as3722.c | 1 - drivers/mfd/asic3.c | 6 +- drivers/mfd/atmel-hlcdc.c | 55 +- drivers/mfd/axp20x.c | 112 +- drivers/mfd/bcm590xx.c | 1 - drivers/mfd/cros_ec_i2c.c | 1 - drivers/mfd/cros_ec_spi.c | 7 + drivers/mfd/da903x.c | 1 - drivers/mfd/da9052-i2c.c | 1 - drivers/mfd/da9055-i2c.c | 1 - drivers/mfd/da9062-core.c | 533 + drivers/mfd/da9063-i2c.c | 1 - drivers/mfd/da9063-irq.c | 4 + drivers/mfd/db8500-prcmu.c | 1 - drivers/mfd/ezx-pcap.c | 13 +- drivers/mfd/htc-egpio.c | 10 +- drivers/mfd/htc-i2cpld.c | 6 +- drivers/mfd/intel-lpss-acpi.c | 84 + drivers/mfd/intel-lpss-pci.c | 113 + drivers/mfd/intel-lpss.c | 524 + drivers/mfd/intel-lpss.h | 64 + drivers/mfd/intel_soc_pmic_core.c | 32 +- drivers/mfd/intel_soc_pmic_crc.c | 3 + drivers/mfd/ipaq-micro.c | 38 +- drivers/mfd/jz4740-adc.c | 11 +- drivers/mfd/kempld-core.c | 16 + drivers/mfd/lm3533-core.c | 1 - drivers/mfd/lp3943.c | 1 - drivers/mfd/lp8788-irq.c | 5 - drivers/mfd/lp8788.c | 1 - drivers/mfd/lpc_ich.c | 32 +- drivers/mfd/max14577.c | 1 - drivers/mfd/max77686.c | 1 - drivers/mfd/max77693.c | 32 +- drivers/mfd/max77843.c | 20 +- drivers/mfd/max8907.c | 1 - drivers/mfd/max8925-core.c | 5 +- drivers/mfd/max8925-i2c.c | 1 - drivers/mfd/max8997-irq.c | 20 +- drivers/mfd/max8997.c | 1 - drivers/mfd/max8998-irq.c | 14 +- drivers/mfd/max8998.c | 1 - drivers/mfd/mc13xxx-i2c.c | 1 - drivers/mfd/mfd-core.c | 2 +- drivers/mfd/mt6397-core.c | 61 +- drivers/mfd/palmas.c | 1 - drivers/mfd/pm8921-core.c | 54 +- drivers/mfd/qcom_rpm.c | 1 + drivers/mfd/rc5t583-irq.c | 4 +- drivers/mfd/rc5t583.c | 1 - drivers/mfd/retu-mfd.c | 1 - drivers/mfd/rt5033.c | 1 + drivers/mfd/sec-core.c | 1 - drivers/mfd/si476x-i2c.c | 1 - drivers/mfd/smsc-ece1099.c | 1 - drivers/mfd/stmpe-i2c.c | 1 - drivers/mfd/stmpe-spi.c | 13 + drivers/mfd/stmpe.c | 7 - drivers/mfd/stw481x.c | 1 + drivers/mfd/t7l66xb.c | 20 +- drivers/mfd/tc3589x.c | 8 - drivers/mfd/tc6393xb.c | 16 +- drivers/mfd/tps6507x.c | 1 - drivers/mfd/tps65090.c | 1 - drivers/mfd/tps65217.c | 2 +- drivers/mfd/tps65218.c | 2 +- drivers/mfd/tps6586x.c | 12 +- drivers/mfd/tps65910.c | 1 - drivers/mfd/tps65912-i2c.c | 1 - drivers/mfd/tps65912-irq.c | 8 +- drivers/mfd/tps80031.c | 1 - drivers/mfd/twl-core.c | 9 +- drivers/mfd/twl4030-irq.c | 11 +- drivers/mfd/twl6030-irq.c | 15 +- drivers/mfd/twl6040.c | 5 +- drivers/mfd/ucb1x00-core.c | 6 +- drivers/mfd/wm5102-tables.c | 57 +- drivers/mfd/wm5110-tables.c | 36 +- drivers/mfd/wm831x-i2c.c | 1 - drivers/mfd/wm831x-irq.c | 7 - drivers/mfd/wm8350-i2c.c | 1 - drivers/mfd/wm8350-irq.c | 8 +- drivers/mfd/wm8400-core.c | 1 - drivers/mfd/wm8994-core.c | 9 +- drivers/mfd/wm8994-irq.c | 9 +- drivers/mfd/wm8994-regmap.c | 6 +- drivers/mfd/wm8997-tables.c | 10 +- drivers/mfd/wm8998-tables.c | 1594 +++ drivers/misc/Kconfig | 10 + drivers/misc/Makefile | 1 + drivers/misc/ad525x_dpot-i2c.c | 1 - drivers/misc/apds990x.c | 1 - drivers/misc/bh1770glc.c | 1 - drivers/misc/bmp085-i2c.c | 1 - drivers/misc/cxl/Kconfig | 7 +- drivers/misc/cxl/Makefile | 2 + drivers/misc/cxl/api.c | 58 +- drivers/misc/cxl/context.c | 25 +- drivers/misc/cxl/cxl.h | 95 +- drivers/misc/cxl/debugfs.c | 2 +- drivers/misc/cxl/file.c | 40 +- drivers/misc/cxl/irq.c | 52 +- drivers/misc/cxl/main.c | 1 + drivers/misc/cxl/native.c | 121 +- drivers/misc/cxl/pci.c | 618 +- drivers/misc/cxl/sysfs.c | 35 +- drivers/misc/cxl/trace.h | 10 +- drivers/misc/cxl/vphb.c | 40 + drivers/misc/ds1682.c | 12 - drivers/misc/eeprom/Kconfig | 13 - drivers/misc/eeprom/Makefile | 1 - drivers/misc/eeprom/at24.c | 38 +- drivers/misc/eeprom/eeprom.c | 5 - drivers/misc/eeprom/eeprom_93xx46.c | 14 - drivers/misc/eeprom/max6875.c | 7 +- drivers/misc/eeprom/sunxi_sid.c | 156 - drivers/misc/genwqe/card_dev.c | 2 +- drivers/misc/isl29003.c | 1 - drivers/misc/lis3lv02d/lis3lv02d_i2c.c | 1 - drivers/misc/mei/Makefile | 2 +- drivers/misc/mei/bus-fixup.c | 306 + drivers/misc/mei/bus.c | 1008 +- drivers/misc/mei/client.c | 333 +- drivers/misc/mei/client.h | 8 + drivers/misc/mei/debugfs.c | 9 +- drivers/misc/mei/hbm.c | 330 +- drivers/misc/mei/hbm.h | 3 + drivers/misc/mei/hw-me-regs.h | 27 +- drivers/misc/mei/hw-me.c | 499 +- drivers/misc/mei/hw-me.h | 8 +- drivers/misc/mei/hw.h | 134 +- drivers/misc/mei/init.c | 3 +- drivers/misc/mei/interrupt.c | 27 +- drivers/misc/mei/main.c | 96 + drivers/misc/mei/mei_dev.h | 47 +- drivers/misc/mei/nfc.c | 415 - drivers/misc/mei/pci-me.c | 32 +- drivers/misc/mei/wd.c | 1 + drivers/misc/qcom-coincell.c | 152 + drivers/misc/sgi-xp/xpc_uv.c | 2 +- drivers/misc/sram.c | 8 +- drivers/misc/ti-st/st_kim.c | 105 +- drivers/misc/ti-st/st_ll.c | 17 +- drivers/misc/tsl2550.c | 1 - drivers/misc/vmw_balloon.c | 170 +- drivers/misc/vmw_vmci/vmci_host.c | 7 +- drivers/mmc/card/block.c | 17 + drivers/mmc/card/mmc_test.c | 9 +- drivers/mmc/card/queue.c | 6 +- drivers/mmc/core/core.c | 40 +- drivers/mmc/core/host.c | 42 +- drivers/mmc/core/mmc.c | 7 - drivers/mmc/host/Kconfig | 8 + drivers/mmc/host/Makefile | 1 + drivers/mmc/host/android-goldfish.c | 2 +- drivers/mmc/host/atmel-mci.c | 1 - drivers/mmc/host/dw_mmc-rockchip.c | 3 + drivers/mmc/host/dw_mmc.c | 179 +- drivers/mmc/host/omap.c | 9 +- drivers/mmc/host/omap_hsmmc.c | 347 +- drivers/mmc/host/pxamci.c | 266 +- drivers/mmc/host/sdhci-esdhc-imx.c | 114 +- drivers/mmc/host/sdhci-esdhc.h | 3 +- drivers/mmc/host/sdhci-msm.c | 5 + drivers/mmc/host/sdhci-of-arasan.c | 4 + drivers/mmc/host/sdhci-of-at91.c | 192 + drivers/mmc/host/sdhci-sirf.c | 4 +- drivers/mmc/host/sdhci.c | 54 +- drivers/mmc/host/sdhci.h | 7 + drivers/mmc/host/sh_mmcif.c | 2 + drivers/mmc/host/sunxi-mmc.c | 55 +- drivers/mmc/host/tmio_mmc_pio.c | 6 +- drivers/mmc/host/usdhi6rol0.c | 15 +- drivers/mtd/devices/m25p80.c | 18 +- drivers/mtd/devices/mtd_dataflash.c | 1 + drivers/mtd/devices/slram.c | 2 +- drivers/mtd/maps/nettel.c | 13 +- drivers/mtd/maps/physmap_of.c | 6 + drivers/mtd/mtd_blkdevs.c | 10 +- drivers/mtd/nand/Kconfig | 13 +- drivers/mtd/nand/Makefile | 3 +- drivers/mtd/nand/brcmnand/brcmnand.h | 4 +- drivers/mtd/nand/davinci_nand.c | 42 +- drivers/mtd/nand/denali_pci.c | 43 +- drivers/mtd/nand/diskonchip.c | 2 +- drivers/mtd/nand/fsl_ifc_nand.c | 258 +- drivers/mtd/nand/mxc_nand.c | 2 +- drivers/mtd/nand/nand_ids.c | 4 + drivers/mtd/nand/nandsim.c | 28 +- drivers/mtd/nand/omap_elm.c | 2 +- drivers/mtd/nand/pxa3xx_nand.c | 58 +- drivers/mtd/nand/r852.c | 2 +- drivers/mtd/nand/sunxi_nand.c | 88 +- drivers/mtd/onenand/generic.c | 2 +- drivers/mtd/spi-nor/Kconfig | 14 +- drivers/mtd/spi-nor/Makefile | 1 + drivers/mtd/spi-nor/fsl-quadspi.c | 265 +- drivers/mtd/spi-nor/nxp-spifi.c | 482 + drivers/mtd/spi-nor/spi-nor.c | 79 +- drivers/mtd/tests/oobtest.c | 18 +- drivers/net/Kconfig | 19 +- drivers/net/Makefile | 3 + drivers/net/arcnet/arcnet.c | 2 +- drivers/net/bonding/bond_3ad.c | 2 - drivers/net/bonding/bond_main.c | 10 +- drivers/net/bonding/bond_netlink.c | 17 +- drivers/net/bonding/bond_options.c | 20 +- drivers/net/bonding/bond_sysfs.c | 20 +- drivers/net/caif/caif_hsi.c | 2 +- drivers/net/caif/caif_serial.c | 2 +- drivers/net/caif/caif_spi.c | 2 +- drivers/net/can/dev.c | 2 +- drivers/net/can/flexcan.c | 2 +- drivers/net/can/sja1000/peak_pci.c | 1 + drivers/net/can/sja1000/sja1000.c | 3 + drivers/net/can/usb/gs_usb.c | 8 +- drivers/net/dsa/Kconfig | 6 +- drivers/net/dsa/mv88e6123_61_65.c | 1 + drivers/net/dsa/mv88e6131.c | 1 + drivers/net/dsa/mv88e6171.c | 12 +- drivers/net/dsa/mv88e6352.c | 115 +- drivers/net/dsa/mv88e6xxx.c | 1121 +- drivers/net/dsa/mv88e6xxx.h | 91 +- drivers/net/dummy.c | 3 +- drivers/net/ethernet/3com/3c59x.c | 23 +- drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/allwinner/sun4i-emac.c | 31 +- drivers/net/ethernet/altera/altera_sgdma.c | 8 +- drivers/net/ethernet/altera/altera_sgdmahw.h | 1 + drivers/net/ethernet/altera/altera_tse.h | 1 - drivers/net/ethernet/altera/altera_tse_main.c | 1 + drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c | 4 + drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 2 +- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 8 +- drivers/net/ethernet/amd/xgbe/xgbe.h | 2 +- drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | 24 +- drivers/net/ethernet/apm/xgene/xgene_enet_hw.h | 16 +- drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 312 +- drivers/net/ethernet/apm/xgene/xgene_enet_main.h | 12 + drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c | 8 +- drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h | 2 + drivers/net/ethernet/arc/emac_arc.c | 1 + drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 10 + drivers/net/ethernet/broadcom/Kconfig | 10 + drivers/net/ethernet/broadcom/bcm63xx_enet.c | 33 +- drivers/net/ethernet/broadcom/bcmsysport.c | 19 + drivers/net/ethernet/broadcom/bgmac.c | 30 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 64 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 100 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 71 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | 12 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h | 10 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h | 10 +- .../net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 29 +- .../net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h | 6 +- .../ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h | 2 + drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h | 200 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h | 4 +- .../net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h | 4 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | 254 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h | 10 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 595 +- .../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h | 4 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h | 79 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | 337 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h | 77 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 358 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | 58 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | 4 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h | 4 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 212 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h | 37 +- drivers/net/ethernet/broadcom/cnic.c | 36 +- drivers/net/ethernet/broadcom/cnic_if.h | 21 +- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 112 +- drivers/net/ethernet/broadcom/genet/bcmgenet.h | 4 +- drivers/net/ethernet/broadcom/genet/bcmmii.c | 117 +- drivers/net/ethernet/brocade/bna/bfa_ioc.c | 13 +- drivers/net/ethernet/brocade/bna/bna_tx_rx.c | 2 + drivers/net/ethernet/brocade/bna/bna_types.h | 1 + drivers/net/ethernet/brocade/bna/bnad.c | 29 +- drivers/net/ethernet/brocade/bna/bnad.h | 2 + drivers/net/ethernet/brocade/bna/bnad_ethtool.c | 4 + drivers/net/ethernet/cadence/macb.c | 6 +- drivers/net/ethernet/cadence/macb.h | 2 +- drivers/net/ethernet/cavium/Kconfig | 4 +- drivers/net/ethernet/cavium/liquidio/lio_main.c | 3 +- drivers/net/ethernet/cavium/thunder/nic.h | 93 +- drivers/net/ethernet/cavium/thunder/nic_main.c | 240 +- drivers/net/ethernet/cavium/thunder/nic_reg.h | 4 + .../net/ethernet/cavium/thunder/nicvf_ethtool.c | 182 +- drivers/net/ethernet/cavium/thunder/nicvf_main.c | 543 +- drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 144 +- drivers/net/ethernet/cavium/thunder/nicvf_queues.h | 44 +- drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 169 +- drivers/net/ethernet/cavium/thunder/thunder_bgx.h | 4 + drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 10 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c | 42 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 775 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 14 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 89 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | 10 +- drivers/net/ethernet/chelsio/cxgb4/l2t.c | 94 +- drivers/net/ethernet/chelsio/cxgb4/l2t.h | 18 +- drivers/net/ethernet/chelsio/cxgb4/sge.c | 25 +- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 346 +- drivers/net/ethernet/chelsio/cxgb4/t4_hw.h | 1 - drivers/net/ethernet/chelsio/cxgb4/t4_msg.h | 3 + drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | 23 + drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 197 +- drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 33 +- drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h | 23 +- drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 30 +- drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | 3 +- drivers/net/ethernet/cisco/enic/enic.h | 21 +- drivers/net/ethernet/cisco/enic/enic_clsf.c | 2 +- drivers/net/ethernet/cisco/enic/enic_ethtool.c | 113 +- drivers/net/ethernet/cisco/enic/enic_main.c | 142 +- drivers/net/ethernet/cisco/enic/vnic_cq.c | 3 +- drivers/net/ethernet/cisco/enic/vnic_dev.c | 277 +- drivers/net/ethernet/cisco/enic/vnic_dev.h | 44 +- drivers/net/ethernet/cisco/enic/vnic_devcmd.h | 28 + drivers/net/ethernet/cisco/enic/vnic_intr.c | 3 +- drivers/net/ethernet/cisco/enic/vnic_resource.h | 7 + drivers/net/ethernet/cisco/enic/vnic_rq.c | 6 +- drivers/net/ethernet/cisco/enic/vnic_wq.c | 33 +- drivers/net/ethernet/cisco/enic/vnic_wq.h | 18 + drivers/net/ethernet/davicom/dm9000.c | 2 +- drivers/net/ethernet/ec_bhf.c | 14 +- drivers/net/ethernet/emulex/benet/be.h | 9 +- drivers/net/ethernet/emulex/benet/be_cmds.c | 100 +- drivers/net/ethernet/emulex/benet/be_cmds.h | 21 +- drivers/net/ethernet/emulex/benet/be_ethtool.c | 17 +- drivers/net/ethernet/emulex/benet/be_main.c | 107 +- drivers/net/ethernet/ethoc.c | 7 +- drivers/net/ethernet/ezchip/nps_enet.c | 37 +- drivers/net/ethernet/ezchip/nps_enet.h | 20 - drivers/net/ethernet/freescale/fec_main.c | 53 +- drivers/net/ethernet/freescale/fec_ptp.c | 6 - drivers/net/ethernet/freescale/fsl_pq_mdio.c | 34 +- drivers/net/ethernet/freescale/gianfar.c | 525 +- drivers/net/ethernet/freescale/gianfar.h | 77 +- drivers/net/ethernet/freescale/gianfar_ethtool.c | 8 +- drivers/net/ethernet/freescale/gianfar_ptp.c | 1 + drivers/net/ethernet/freescale/ucc_geth.c | 8 +- drivers/net/ethernet/hisilicon/hip04_eth.c | 3 +- drivers/net/ethernet/hisilicon/hip04_mdio.c | 1 - drivers/net/ethernet/ibm/ibmveth.c | 145 +- drivers/net/ethernet/ibm/ibmveth.h | 18 +- drivers/net/ethernet/intel/e100.c | 12 +- drivers/net/ethernet/intel/e1000e/ich8lan.h | 4 +- drivers/net/ethernet/intel/e1000e/netdev.c | 58 +- drivers/net/ethernet/intel/e1000e/regs.h | 5 +- drivers/net/ethernet/intel/i40e/i40e.h | 74 +- drivers/net/ethernet/intel/i40e/i40e_adminq.c | 13 +- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 72 +- drivers/net/ethernet/intel/i40e/i40e_common.c | 407 +- drivers/net/ethernet/intel/i40e/i40e_dcb.c | 4 + drivers/net/ethernet/intel/i40e/i40e_dcb.h | 8 +- drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 10 +- drivers/net/ethernet/intel/i40e/i40e_diag.c | 11 +- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 157 +- drivers/net/ethernet/intel/i40e/i40e_fcoe.c | 12 +- drivers/net/ethernet/intel/i40e/i40e_fcoe.h | 4 +- drivers/net/ethernet/intel/i40e/i40e_hmc.c | 67 +- drivers/net/ethernet/intel/i40e/i40e_hmc.h | 10 +- drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c | 18 +- drivers/net/ethernet/intel/i40e/i40e_main.c | 790 +- drivers/net/ethernet/intel/i40e/i40e_nvm.c | 135 +- drivers/net/ethernet/intel/i40e/i40e_prototype.h | 13 + drivers/net/ethernet/intel/i40e/i40e_ptp.c | 7 +- drivers/net/ethernet/intel/i40e/i40e_register.h | 1938 +++- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 259 +- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 60 +- drivers/net/ethernet/intel/i40e/i40e_type.h | 85 +- drivers/net/ethernet/intel/i40e/i40e_virtchnl.h | 17 +- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 91 +- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h | 5 + drivers/net/ethernet/intel/i40evf/i40e_adminq.c | 30 +- .../net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 67 +- drivers/net/ethernet/intel/i40evf/i40e_common.c | 380 +- drivers/net/ethernet/intel/i40evf/i40e_hmc.h | 10 +- drivers/net/ethernet/intel/i40evf/i40e_prototype.h | 13 + drivers/net/ethernet/intel/i40evf/i40e_register.h | 3155 +----- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 199 +- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 58 +- drivers/net/ethernet/intel/i40evf/i40e_type.h | 81 +- drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h | 17 +- drivers/net/ethernet/intel/i40evf/i40evf.h | 61 +- drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | 44 +- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 350 +- .../net/ethernet/intel/i40evf/i40evf_virtchnl.c | 51 +- drivers/net/ethernet/intel/igb/e1000_82575.c | 38 +- drivers/net/ethernet/intel/igb/e1000_defines.h | 5 + drivers/net/ethernet/intel/igb/e1000_phy.c | 109 +- drivers/net/ethernet/intel/igb/e1000_phy.h | 1 + drivers/net/ethernet/intel/igb/e1000_regs.h | 2 + drivers/net/ethernet/intel/igb/igb_ethtool.c | 25 +- drivers/net/ethernet/intel/igb/igb_main.c | 118 +- drivers/net/ethernet/intel/igb/igb_ptp.c | 72 +- drivers/net/ethernet/intel/igbvf/netdev.c | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe.h | 7 + drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c | 91 +- drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 15 + drivers/net/ethernet/intel/ixgbe/ixgbe_common.h | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 62 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 277 +- drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 75 +- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 73 +- drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 5 + drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 182 +- drivers/net/ethernet/intel/ixgbevf/defines.h | 12 + drivers/net/ethernet/intel/ixgbevf/ethtool.c | 51 +- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 9 +- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 114 +- drivers/net/ethernet/jme.c | 8 +- drivers/net/ethernet/marvell/mv643xx_eth.c | 57 +- drivers/net/ethernet/marvell/mvneta.c | 12 +- drivers/net/ethernet/mellanox/Kconfig | 1 + drivers/net/ethernet/mellanox/Makefile | 1 + drivers/net/ethernet/mellanox/mlx4/cmd.c | 2 +- drivers/net/ethernet/mellanox/mlx4/en_cq.c | 5 +- drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 51 +- drivers/net/ethernet/mellanox/mlx4/en_main.c | 36 +- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 46 + drivers/net/ethernet/mellanox/mlx4/en_rx.c | 36 +- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 15 +- drivers/net/ethernet/mellanox/mlx4/eq.c | 6 +- drivers/net/ethernet/mellanox/mlx4/fw.c | 82 + drivers/net/ethernet/mellanox/mlx4/fw.h | 1 + drivers/net/ethernet/mellanox/mlx4/intf.c | 3 + drivers/net/ethernet/mellanox/mlx4/main.c | 45 +- drivers/net/ethernet/mellanox/mlx4/mcg.c | 7 +- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 3 + drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/alloc.c | 48 +- drivers/net/ethernet/mellanox/mlx5/core/en.h | 172 +- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 259 +- .../ethernet/mellanox/mlx5/core/en_flow_table.c | 387 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 1017 +- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 46 +- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 36 +- drivers/net/ethernet/mellanox/mlx5/core/main.c | 32 +- .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 7 +- drivers/net/ethernet/mellanox/mlx5/core/port.c | 58 +- drivers/net/ethernet/mellanox/mlx5/core/transobj.c | 53 + drivers/net/ethernet/mellanox/mlx5/core/transobj.h | 8 + drivers/net/ethernet/mellanox/mlx5/core/uar.c | 6 + drivers/net/ethernet/mellanox/mlx5/core/wq.c | 12 +- drivers/net/ethernet/mellanox/mlx5/core/wq.h | 3 +- drivers/net/ethernet/mellanox/mlxsw/Kconfig | 32 + drivers/net/ethernet/mellanox/mlxsw/Makefile | 6 + drivers/net/ethernet/mellanox/mlxsw/cmd.h | 1090 ++ drivers/net/ethernet/mellanox/mlxsw/core.c | 1300 +++ drivers/net/ethernet/mellanox/mlxsw/core.h | 207 + drivers/net/ethernet/mellanox/mlxsw/emad.h | 127 + drivers/net/ethernet/mellanox/mlxsw/item.h | 407 + drivers/net/ethernet/mellanox/mlxsw/pci.c | 1826 ++++ drivers/net/ethernet/mellanox/mlxsw/pci.h | 227 + drivers/net/ethernet/mellanox/mlxsw/port.h | 75 + drivers/net/ethernet/mellanox/mlxsw/reg.h | 1349 +++ drivers/net/ethernet/mellanox/mlxsw/switchx2.c | 1568 +++ drivers/net/ethernet/mellanox/mlxsw/trap.h | 66 + drivers/net/ethernet/mellanox/mlxsw/txheader.h | 80 + drivers/net/ethernet/micrel/ks8851.c | 1 + drivers/net/ethernet/moxa/moxart_ether.c | 1 + drivers/net/ethernet/neterion/s2io.c | 26 +- drivers/net/ethernet/neterion/s2io.h | 2 - drivers/net/ethernet/nvidia/forcedeth.c | 24 +- drivers/net/ethernet/nxp/lpc_eth.c | 13 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 20 +- .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 33 +- .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h | 2 + .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | 4 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c | 2 - drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c | 6 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h | 1 + drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 33 +- .../net/ethernet/qlogic/qlcnic/qlcnic_minidump.c | 41 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h | 3 +- .../ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | 5 +- .../net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c | 3 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | 7 +- drivers/net/ethernet/realtek/8139cp.c | 111 +- drivers/net/ethernet/realtek/r8169.c | 175 +- drivers/net/ethernet/renesas/ravb.h | 5 +- drivers/net/ethernet/renesas/ravb_main.c | 118 +- drivers/net/ethernet/renesas/sh_eth.c | 18 +- drivers/net/ethernet/rocker/rocker.c | 212 +- drivers/net/ethernet/rocker/rocker.h | 2 + drivers/net/ethernet/sfc/ef10.c | 562 +- drivers/net/ethernet/sfc/efx.c | 57 +- drivers/net/ethernet/sfc/efx.h | 1 + drivers/net/ethernet/sfc/falcon.c | 1 + drivers/net/ethernet/sfc/farch.c | 4 +- drivers/net/ethernet/sfc/mcdi.c | 28 +- drivers/net/ethernet/sfc/mcdi.h | 3 +- drivers/net/ethernet/sfc/mcdi_pcol.h | 3463 ++++-- drivers/net/ethernet/sfc/net_driver.h | 5 + drivers/net/ethernet/sfc/nic.h | 2 + drivers/net/ethernet/sfc/selftest.c | 14 +- drivers/net/ethernet/sfc/siena.c | 7 +- drivers/net/ethernet/sfc/tx.c | 30 +- drivers/net/ethernet/smsc/smc9194.c | 32 +- drivers/net/ethernet/smsc/smsc911x.c | 65 +- .../net/ethernet/stmicro/stmmac/dwmac-generic.c | 42 +- .../net/ethernet/stmicro/stmmac/dwmac-ipq806x.c | 50 +- .../net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c | 59 +- drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c | 31 +- drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | 73 +- .../net/ethernet/stmicro/stmmac/dwmac-socfpga.c | 78 +- drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c | 83 +- drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c | 95 +- .../net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 7 +- drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | 11 +- .../net/ethernet/stmicro/stmmac/stmmac_platform.c | 138 +- .../net/ethernet/stmicro/stmmac/stmmac_platform.h | 9 +- drivers/net/ethernet/sun/sunvnet.c | 17 +- drivers/net/ethernet/synopsys/Kconfig | 27 + drivers/net/ethernet/synopsys/Makefile | 5 + drivers/net/ethernet/synopsys/dwc_eth_qos.c | 3019 ++++++ drivers/net/ethernet/ti/cpmac.c | 2 + drivers/net/ethernet/ti/cpsw.c | 171 +- drivers/net/ethernet/ti/davinci_emac.c | 4 +- drivers/net/ethernet/ti/netcp_core.c | 90 +- drivers/net/ethernet/ti/netcp_ethss.c | 456 +- drivers/net/ethernet/tile/tilegx.c | 4 +- drivers/net/ethernet/via/via-rhine.c | 3 +- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 2 + drivers/net/fddi/skfp/h/hwmtm.h | 9 - drivers/net/fjes/Makefile | 30 + drivers/net/fjes/fjes.h | 77 + drivers/net/fjes/fjes_ethtool.c | 137 + drivers/net/fjes/fjes_hw.c | 1125 ++ drivers/net/fjes/fjes_hw.h | 334 + drivers/net/fjes/fjes_main.c | 1383 +++ drivers/net/fjes/fjes_regs.h | 142 + drivers/net/geneve.c | 785 +- drivers/net/hamradio/baycom_epp.c | 2 +- drivers/net/hyperv/hyperv_net.h | 33 + drivers/net/hyperv/netvsc.c | 43 +- drivers/net/hyperv/netvsc_drv.c | 166 +- drivers/net/hyperv/rndis_filter.c | 37 +- drivers/net/ieee802154/at86rf230.c | 56 +- drivers/net/ieee802154/cc2520.c | 2 +- drivers/net/ieee802154/mrf24j40.c | 1 - drivers/net/ifb.c | 207 +- drivers/net/ipvlan/ipvlan_main.c | 3 +- drivers/net/irda/ali-ircc.c | 6 - drivers/net/loopback.c | 3 +- drivers/net/macvlan.c | 1 + drivers/net/macvtap.c | 2 +- drivers/net/nlmon.c | 2 +- drivers/net/ntb_netdev.c | 77 + drivers/net/phy/Kconfig | 32 +- drivers/net/phy/Makefile | 4 + drivers/net/phy/aquantia.c | 201 + drivers/net/phy/dp83640.c | 10 +- drivers/net/phy/dp83848.c | 99 + drivers/net/phy/dp83867.c | 6 +- drivers/net/phy/fixed_phy.c | 112 +- drivers/net/phy/marvell.c | 62 +- drivers/net/phy/mdio-bcm-unimac.c | 1 + drivers/net/phy/mdio-gpio.c | 1 + drivers/net/phy/mdio-mux-mmioreg.c | 2 + drivers/net/phy/mdio-mux.c | 20 +- drivers/net/phy/mdio-octeon.c | 136 +- drivers/net/phy/mdio_bus.c | 31 +- drivers/net/phy/micrel.c | 23 +- drivers/net/phy/microchip.c | 148 + drivers/net/phy/phy.c | 7 +- drivers/net/phy/phy_device.c | 70 +- drivers/net/phy/realtek.c | 14 + drivers/net/phy/smsc.c | 19 +- drivers/net/phy/spi_ks8995.c | 22 - drivers/net/phy/teranetics.c | 135 + drivers/net/phy/vitesse.c | 14 - drivers/net/ppp/ppp_generic.c | 17 +- drivers/net/ppp/pppoe.c | 2 +- drivers/net/rionet.c | 4 +- drivers/net/team/team.c | 2 +- drivers/net/tun.c | 1 + drivers/net/usb/Kconfig | 22 + drivers/net/usb/Makefile | 3 +- drivers/net/usb/asix_common.c | 4 +- drivers/net/usb/asix_devices.c | 4 + drivers/net/usb/ch9200.c | 432 + drivers/net/usb/lan78xx.c | 3494 ++++++ drivers/net/usb/lan78xx.h | 1069 ++ drivers/net/usb/qmi_wwan.c | 9 +- drivers/net/usb/r8152.c | 89 +- drivers/net/usb/usbnet.c | 39 +- drivers/net/veth.c | 2 + drivers/net/virtio_net.c | 30 +- drivers/net/vrf.c | 711 ++ drivers/net/vxlan.c | 775 +- drivers/net/wan/hdlc_fr.c | 2 +- drivers/net/wan/sbni.c | 2 + drivers/net/wireless/ath/ath10k/Makefile | 3 +- drivers/net/wireless/ath/ath10k/bmi.h | 2 +- drivers/net/wireless/ath/ath10k/ce.c | 1 + drivers/net/wireless/ath/ath10k/ce.h | 17 +- drivers/net/wireless/ath/ath10k/core.c | 188 +- drivers/net/wireless/ath/ath10k/core.h | 65 +- drivers/net/wireless/ath/ath10k/debug.c | 30 +- drivers/net/wireless/ath/ath10k/htt.c | 66 +- drivers/net/wireless/ath/ath10k/htt.h | 89 +- drivers/net/wireless/ath/ath10k/htt_rx.c | 49 +- drivers/net/wireless/ath/ath10k/htt_tx.c | 137 +- drivers/net/wireless/ath/ath10k/hw.c | 90 +- drivers/net/wireless/ath/ath10k/hw.h | 137 +- drivers/net/wireless/ath/ath10k/mac.c | 300 +- drivers/net/wireless/ath/ath10k/pci.c | 228 +- drivers/net/wireless/ath/ath10k/pci.h | 13 +- drivers/net/wireless/ath/ath10k/rx_desc.h | 173 +- drivers/net/wireless/ath/ath10k/spectral.c | 18 +- drivers/net/wireless/ath/ath10k/spectral.h | 4 +- drivers/net/wireless/ath/ath10k/swap.c | 208 + drivers/net/wireless/ath/ath10k/swap.h | 72 + drivers/net/wireless/ath/ath10k/targaddrs.h | 3 + drivers/net/wireless/ath/ath10k/txrx.c | 23 +- drivers/net/wireless/ath/ath10k/wmi-ops.h | 32 +- drivers/net/wireless/ath/ath10k/wmi-tlv.c | 135 +- drivers/net/wireless/ath/ath10k/wmi.c | 1349 ++- drivers/net/wireless/ath/ath10k/wmi.h | 1024 +- drivers/net/wireless/ath/ath10k/wow.c | 20 +- drivers/net/wireless/ath/ath5k/Kconfig | 1 - drivers/net/wireless/ath/ath5k/ani.c | 4 +- drivers/net/wireless/ath/ath5k/ath5k.h | 4 +- drivers/net/wireless/ath/ath5k/base.c | 4 +- drivers/net/wireless/ath/ath5k/debug.c | 2 +- drivers/net/wireless/ath/ath6kl/htc.h | 2 +- drivers/net/wireless/ath/ath6kl/init.c | 1 + drivers/net/wireless/ath/ath6kl/wmi.c | 4 +- drivers/net/wireless/ath/ath9k/ar9003_phy.h | 25 +- drivers/net/wireless/ath/ath9k/ath9k.h | 22 +- drivers/net/wireless/ath/ath9k/channel.c | 23 +- drivers/net/wireless/ath/ath9k/debug.c | 2 + drivers/net/wireless/ath/ath9k/debug.h | 2 + drivers/net/wireless/ath/ath9k/debug_sta.c | 20 +- drivers/net/wireless/ath/ath9k/dfs.c | 170 +- drivers/net/wireless/ath/ath9k/htc_drv_init.c | 2 +- drivers/net/wireless/ath/ath9k/htc_drv_main.c | 13 +- drivers/net/wireless/ath/ath9k/htc_hst.c | 9 +- drivers/net/wireless/ath/ath9k/hw.c | 1 + drivers/net/wireless/ath/ath9k/init.c | 9 +- drivers/net/wireless/ath/ath9k/link.c | 2 +- drivers/net/wireless/ath/ath9k/main.c | 11 +- drivers/net/wireless/ath/ath9k/recv.c | 7 +- drivers/net/wireless/ath/ath9k/wmi.c | 3 +- drivers/net/wireless/ath/ath9k/xmit.c | 156 +- drivers/net/wireless/ath/debug.c | 2 + drivers/net/wireless/ath/dfs_pri_detector.c | 2 +- drivers/net/wireless/ath/wil6210/Makefile | 1 + drivers/net/wireless/ath/wil6210/boot_loader.h | 61 + drivers/net/wireless/ath/wil6210/cfg80211.c | 244 +- drivers/net/wireless/ath/wil6210/debugfs.c | 51 +- drivers/net/wireless/ath/wil6210/ethtool.c | 14 +- drivers/net/wireless/ath/wil6210/fw.c | 10 - drivers/net/wireless/ath/wil6210/fw_inc.c | 16 +- drivers/net/wireless/ath/wil6210/interrupt.c | 165 +- drivers/net/wireless/ath/wil6210/ioctl.c | 4 +- drivers/net/wireless/ath/wil6210/main.c | 198 +- drivers/net/wireless/ath/wil6210/netdev.c | 5 +- drivers/net/wireless/ath/wil6210/pcie_bus.c | 127 +- drivers/net/wireless/ath/wil6210/pm.c | 98 + drivers/net/wireless/ath/wil6210/rx_reorder.c | 6 + drivers/net/wireless/ath/wil6210/txrx.c | 383 +- drivers/net/wireless/ath/wil6210/txrx.h | 8 + drivers/net/wireless/ath/wil6210/wil6210.h | 64 +- drivers/net/wireless/ath/wil6210/wil_platform.c | 2 +- drivers/net/wireless/ath/wil6210/wmi.c | 132 +- drivers/net/wireless/b43/lo.c | 4 +- drivers/net/wireless/b43/lo.h | 2 +- drivers/net/wireless/b43/main.c | 1 + drivers/net/wireless/b43/phy_g.c | 2 +- drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c | 216 +- drivers/net/wireless/brcm80211/brcmfmac/core.h | 3 + drivers/net/wireless/brcm80211/brcmfmac/firmware.c | 39 +- drivers/net/wireless/brcm80211/brcmfmac/flowring.c | 10 +- drivers/net/wireless/brcm80211/brcmfmac/fweh.h | 10 +- drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c | 56 + drivers/net/wireless/brcm80211/brcmfmac/sdio.c | 13 +- .../net/wireless/brcm80211/brcmsmac/mac80211_if.c | 4 +- drivers/net/wireless/cw1200/cw1200_spi.c | 1 - drivers/net/wireless/hostap/hostap_main.c | 4 +- drivers/net/wireless/ipw2x00/ipw2100.c | 2 +- drivers/net/wireless/ipw2x00/ipw2100.h | 2 +- drivers/net/wireless/iwlegacy/3945-mac.c | 2 +- drivers/net/wireless/iwlegacy/debug.c | 8 +- drivers/net/wireless/iwlwifi/dvm/agn.h | 21 +- drivers/net/wireless/iwlwifi/dvm/debugfs.c | 8 +- drivers/net/wireless/iwlwifi/dvm/dev.h | 7 +- drivers/net/wireless/iwlwifi/dvm/lib.c | 10 +- drivers/net/wireless/iwlwifi/dvm/mac80211.c | 14 +- drivers/net/wireless/iwlwifi/dvm/main.c | 12 - drivers/net/wireless/iwlwifi/dvm/rs.c | 51 +- drivers/net/wireless/iwlwifi/dvm/rx.c | 109 +- drivers/net/wireless/iwlwifi/dvm/rxon.c | 3 +- drivers/net/wireless/iwlwifi/dvm/scan.c | 25 +- drivers/net/wireless/iwlwifi/dvm/sta.c | 111 +- drivers/net/wireless/iwlwifi/dvm/tx.c | 18 +- drivers/net/wireless/iwlwifi/dvm/ucode.c | 5 +- drivers/net/wireless/iwlwifi/iwl-7000.c | 4 +- drivers/net/wireless/iwlwifi/iwl-8000.c | 12 +- drivers/net/wireless/iwlwifi/iwl-config.h | 2 + drivers/net/wireless/iwlwifi/iwl-csr.h | 3 + drivers/net/wireless/iwlwifi/iwl-devtrace-data.h | 7 +- .../net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h | 14 +- drivers/net/wireless/iwlwifi/iwl-drv.c | 72 +- drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c | 4 +- drivers/net/wireless/iwlwifi/iwl-fh.h | 6 - drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h | 17 + drivers/net/wireless/iwlwifi/iwl-fw-file.h | 53 +- drivers/net/wireless/iwlwifi/iwl-fw.h | 68 + drivers/net/wireless/iwlwifi/iwl-notif-wait.c | 8 +- drivers/net/wireless/iwlwifi/iwl-notif-wait.h | 5 +- drivers/net/wireless/iwlwifi/iwl-op-mode.h | 32 +- drivers/net/wireless/iwlwifi/iwl-prph.h | 12 + drivers/net/wireless/iwlwifi/iwl-trans.h | 125 +- drivers/net/wireless/iwlwifi/mvm/Makefile | 1 + drivers/net/wireless/iwlwifi/mvm/coex.c | 44 +- drivers/net/wireless/iwlwifi/mvm/coex_legacy.c | 31 +- drivers/net/wireless/iwlwifi/mvm/constants.h | 1 + drivers/net/wireless/iwlwifi/mvm/d3.c | 101 +- drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c | 751 ++ drivers/net/wireless/iwlwifi/mvm/debugfs.c | 14 +- drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h | 7 +- drivers/net/wireless/iwlwifi/mvm/fw-api-power.h | 31 +- drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h | 147 - drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h | 4 +- drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h | 386 + drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h | 12 + drivers/net/wireless/iwlwifi/mvm/fw-api.h | 86 +- drivers/net/wireless/iwlwifi/mvm/fw.c | 393 +- drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c | 13 +- drivers/net/wireless/iwlwifi/mvm/mac80211.c | 79 +- drivers/net/wireless/iwlwifi/mvm/mvm.h | 159 +- drivers/net/wireless/iwlwifi/mvm/nvm.c | 23 +- drivers/net/wireless/iwlwifi/mvm/ops.c | 115 +- drivers/net/wireless/iwlwifi/mvm/power.c | 46 +- drivers/net/wireless/iwlwifi/mvm/rs.c | 160 +- drivers/net/wireless/iwlwifi/mvm/rs.h | 10 + drivers/net/wireless/iwlwifi/mvm/rx.c | 43 +- drivers/net/wireless/iwlwifi/mvm/scan.c | 319 +- drivers/net/wireless/iwlwifi/mvm/sta.c | 43 +- drivers/net/wireless/iwlwifi/mvm/sta.h | 5 +- drivers/net/wireless/iwlwifi/mvm/tdls.c | 33 +- drivers/net/wireless/iwlwifi/mvm/time-event.c | 14 +- drivers/net/wireless/iwlwifi/mvm/time-event.h | 5 +- drivers/net/wireless/iwlwifi/mvm/tof.c | 304 + drivers/net/wireless/iwlwifi/mvm/tof.h | 94 + drivers/net/wireless/iwlwifi/mvm/tt.c | 13 +- drivers/net/wireless/iwlwifi/mvm/tx.c | 94 +- drivers/net/wireless/iwlwifi/mvm/utils.c | 13 +- drivers/net/wireless/iwlwifi/pcie/drv.c | 41 +- drivers/net/wireless/iwlwifi/pcie/internal.h | 64 +- drivers/net/wireless/iwlwifi/pcie/rx.c | 496 +- drivers/net/wireless/iwlwifi/pcie/trans.c | 424 +- drivers/net/wireless/iwlwifi/pcie/tx.c | 135 +- drivers/net/wireless/mac80211_hwsim.c | 7 +- drivers/net/wireless/mediatek/mt7601u/dma.c | 34 +- drivers/net/wireless/mediatek/mt7601u/init.c | 2 + drivers/net/wireless/mediatek/mt7601u/mac.c | 4 + drivers/net/wireless/mediatek/mt7601u/mt7601u.h | 10 +- drivers/net/wireless/mediatek/mt7601u/tx.c | 3 + drivers/net/wireless/mediatek/mt7601u/usb.c | 63 +- drivers/net/wireless/mediatek/mt7601u/usb.h | 2 + drivers/net/wireless/mwifiex/Kconfig | 12 +- drivers/net/wireless/mwifiex/cfg80211.c | 130 +- drivers/net/wireless/mwifiex/cmdevt.c | 39 +- drivers/net/wireless/mwifiex/debugfs.c | 14 +- drivers/net/wireless/mwifiex/decl.h | 3 + drivers/net/wireless/mwifiex/fw.h | 95 +- drivers/net/wireless/mwifiex/ie.c | 3 + drivers/net/wireless/mwifiex/init.c | 10 +- drivers/net/wireless/mwifiex/join.c | 2 + drivers/net/wireless/mwifiex/main.c | 63 +- drivers/net/wireless/mwifiex/main.h | 40 + drivers/net/wireless/mwifiex/pcie.c | 12 +- drivers/net/wireless/mwifiex/pcie.h | 45 +- drivers/net/wireless/mwifiex/scan.c | 157 +- drivers/net/wireless/mwifiex/sdio.c | 206 +- drivers/net/wireless/mwifiex/sdio.h | 77 + drivers/net/wireless/mwifiex/sta_cmd.c | 90 +- drivers/net/wireless/mwifiex/sta_cmdresp.c | 7 +- drivers/net/wireless/mwifiex/sta_event.c | 207 +- drivers/net/wireless/mwifiex/sta_ioctl.c | 4 +- drivers/net/wireless/mwifiex/tdls.c | 80 +- drivers/net/wireless/mwifiex/txrx.c | 22 +- drivers/net/wireless/mwifiex/uap_cmd.c | 7 +- drivers/net/wireless/mwifiex/uap_event.c | 15 + drivers/net/wireless/mwifiex/usb.c | 23 +- drivers/net/wireless/mwifiex/usb.h | 3 + drivers/net/wireless/mwifiex/util.c | 75 +- drivers/net/wireless/mwifiex/wmm.c | 156 +- drivers/net/wireless/mwifiex/wmm.h | 8 + drivers/net/wireless/mwl8k.c | 49 +- drivers/net/wireless/orinoco/main.c | 2 - drivers/net/wireless/orinoco/orinoco_cs.c | 1 + drivers/net/wireless/orinoco/orinoco_nortel.c | 5 +- drivers/net/wireless/orinoco/orinoco_pci.c | 5 +- drivers/net/wireless/orinoco/orinoco_plx.c | 5 +- drivers/net/wireless/orinoco/orinoco_usb.c | 2 + drivers/net/wireless/rt2x00/Kconfig | 1 - drivers/net/wireless/rt2x00/rt2500usb.h | 2 +- drivers/net/wireless/rt2x00/rt2800usb.c | 1 + drivers/net/wireless/rt2x00/rt2x00.h | 6 +- drivers/net/wireless/rt2x00/rt2x00link.c | 18 +- drivers/net/wireless/rtlwifi/pci.h | 2 + drivers/net/wireless/rtlwifi/rtl8188ee/fw.c | 10 +- drivers/net/wireless/rtlwifi/rtl8188ee/fw.h | 21 +- drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c | 11 +- drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c | 12 +- drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h | 19 - drivers/net/wireless/rtlwifi/rtl8192cu/def.h | 9 - drivers/net/wireless/rtlwifi/rtl8192cu/hw.c | 110 +- drivers/net/wireless/rtlwifi/rtl8192cu/mac.c | 105 +- drivers/net/wireless/rtlwifi/rtl8192cu/mac.h | 10 - drivers/net/wireless/rtlwifi/rtl8192de/dm.c | 2 +- drivers/net/wireless/rtlwifi/rtl8192de/fw.h | 22 - drivers/net/wireless/rtlwifi/rtl8192de/phy.c | 4 +- drivers/net/wireless/rtlwifi/rtl8192ee/fw.c | 12 +- drivers/net/wireless/rtlwifi/rtl8192ee/fw.h | 21 +- drivers/net/wireless/rtlwifi/rtl8192ee/phy.c | 6 +- drivers/net/wireless/rtlwifi/rtl8723ae/sw.c | 4 +- drivers/net/wireless/rtlwifi/rtl8723be/sw.c | 4 +- .../net/wireless/rtlwifi/rtl8723com/fw_common.c | 10 +- .../net/wireless/rtlwifi/rtl8723com/fw_common.h | 19 - drivers/net/wireless/rtlwifi/rtl8821ae/fw.c | 14 +- drivers/net/wireless/rtlwifi/rtl8821ae/fw.h | 23 +- drivers/net/wireless/rtlwifi/rtl8821ae/hw.c | 17 + drivers/net/wireless/rtlwifi/rtl8821ae/sw.c | 5 + drivers/net/wireless/rtlwifi/wifi.h | 28 +- drivers/net/wireless/ti/wl12xx/scan.c | 6 +- drivers/net/wireless/ti/wl18xx/acx.c | 27 + drivers/net/wireless/ti/wl18xx/acx.h | 138 +- drivers/net/wireless/ti/wl18xx/debugfs.c | 230 +- drivers/net/wireless/ti/wl18xx/event.c | 13 + drivers/net/wireless/ti/wl18xx/event.h | 12 +- drivers/net/wireless/ti/wl18xx/main.c | 59 +- drivers/net/wireless/ti/wl18xx/scan.c | 23 +- drivers/net/wireless/ti/wl18xx/scan.h | 4 +- drivers/net/wireless/ti/wlcore/cmd.c | 56 +- drivers/net/wireless/ti/wlcore/cmd.h | 15 + drivers/net/wireless/ti/wlcore/conf.h | 11 +- drivers/net/wireless/ti/wlcore/init.c | 2 +- drivers/net/wireless/ti/wlcore/init.h | 1 + drivers/net/wireless/ti/wlcore/main.c | 69 +- drivers/net/wireless/ti/wlcore/rx.c | 9 +- drivers/net/wireless/ti/wlcore/rx.h | 3 + drivers/net/wireless/ti/wlcore/scan.h | 6 + drivers/net/wireless/ti/wlcore/sdio.c | 3 +- drivers/net/wireless/ti/wlcore/wlcore.h | 3 + drivers/net/wireless/ti/wlcore/wlcore_i.h | 5 + drivers/net/xen-netback/common.h | 28 +- drivers/net/xen-netback/interface.c | 10 + drivers/net/xen-netback/netback.c | 133 +- drivers/net/xen-netback/xenbus.c | 19 + drivers/net/xen-netfront.c | 38 +- drivers/nfc/Kconfig | 1 + drivers/nfc/Makefile | 1 + drivers/nfc/mei_phy.c | 3 +- drivers/nfc/nxp-nci/i2c.c | 10 +- drivers/nfc/s3fwrn5/Kconfig | 19 + drivers/nfc/s3fwrn5/Makefile | 11 + drivers/nfc/s3fwrn5/core.c | 219 + drivers/nfc/s3fwrn5/firmware.c | 511 + drivers/nfc/s3fwrn5/firmware.h | 111 + drivers/nfc/s3fwrn5/i2c.c | 306 + drivers/nfc/s3fwrn5/nci.c | 165 + drivers/nfc/s3fwrn5/nci.h | 89 + drivers/nfc/s3fwrn5/s3fwrn5.h | 99 + drivers/nfc/st-nci/Kconfig | 11 + drivers/nfc/st-nci/Makefile | 3 + drivers/nfc/st-nci/ndlc.c | 1 - drivers/nfc/st-nci/spi.c | 394 + drivers/nfc/trf7970a.c | 6 +- drivers/ntb/hw/intel/ntb_hw_intel.c | 39 +- drivers/ntb/hw/intel/ntb_hw_intel.h | 3 + drivers/ntb/ntb_transport.c | 126 +- drivers/nvdimm/Kconfig | 23 + drivers/nvdimm/Makefile | 5 + drivers/nvdimm/blk.c | 5 +- drivers/nvdimm/btt.c | 55 +- drivers/nvdimm/btt.h | 3 + drivers/nvdimm/btt_devs.c | 219 +- drivers/nvdimm/claim.c | 201 + drivers/nvdimm/dimm_devs.c | 5 +- drivers/nvdimm/e820.c | 87 + drivers/nvdimm/namespace_devs.c | 89 +- drivers/nvdimm/nd-core.h | 9 + drivers/nvdimm/nd.h | 67 +- drivers/nvdimm/pfn.h | 35 + drivers/nvdimm/pfn_devs.c | 337 + drivers/nvdimm/pmem.c | 247 +- drivers/nvdimm/region.c | 2 + drivers/nvdimm/region_devs.c | 20 + drivers/nvmem/Kconfig | 39 + drivers/nvmem/Makefile | 12 + drivers/nvmem/core.c | 1083 ++ drivers/nvmem/qfprom.c | 85 + drivers/nvmem/sunxi_sid.c | 180 + drivers/of/irq.c | 22 + drivers/of/of_mdio.c | 40 +- drivers/of/of_pci_irq.c | 22 +- drivers/of/platform.c | 10 + drivers/parisc/ccio-dma.c | 13 +- drivers/parisc/iosapic.c | 2 +- drivers/parisc/sba_iommu.c | 9 +- drivers/pci/Makefile | 1 + drivers/pci/access.c | 23 +- drivers/pci/ats.c | 131 +- drivers/pci/host/Kconfig | 8 +- drivers/pci/host/pci-dra7xx.c | 122 +- drivers/pci/host/pci-host-generic.c | 52 +- drivers/pci/host/pci-imx6.c | 12 +- drivers/pci/host/pci-keystone-dw.c | 23 +- drivers/pci/host/pci-keystone.c | 12 +- drivers/pci/host/pci-mvebu.c | 1 + drivers/pci/host/pci-rcar-gen2.c | 1 + drivers/pci/host/pci-tegra.c | 1 - drivers/pci/host/pci-xgene-msi.c | 59 +- drivers/pci/host/pci-xgene.c | 13 +- drivers/pci/host/pcie-designware.c | 21 +- drivers/pci/host/pcie-iproc.c | 60 +- drivers/pci/host/pcie-iproc.h | 4 +- drivers/pci/host/pcie-rcar.c | 1 - drivers/pci/host/pcie-spear13xx.c | 3 +- drivers/pci/host/pcie-xilinx.c | 42 +- drivers/pci/hotplug/pci_hotplug_core.c | 122 +- drivers/pci/hotplug/pciehp.h | 14 +- drivers/pci/hotplug/pciehp_hpc.c | 48 +- drivers/pci/msi.c | 132 +- drivers/pci/of.c | 30 + drivers/pci/pci-acpi.c | 2 +- drivers/pci/pci-driver.c | 35 +- drivers/pci/pci-sysfs.c | 2 +- drivers/pci/pci.c | 28 +- drivers/pci/pci.h | 2 + drivers/pci/pcie/portdrv_core.c | 2 +- drivers/pci/probe.c | 139 +- drivers/pci/quirks.c | 183 +- drivers/pci/slot.c | 29 +- drivers/pci/xen-pcifront.c | 2 +- drivers/pcmcia/pxa2xx_base.c | 17 +- drivers/pcmcia/sa1100_generic.c | 1 - drivers/pcmcia/sa1111_generic.c | 14 +- drivers/pcmcia/sa11xx_base.c | 14 +- drivers/pcmcia/soc_common.h | 1 - drivers/perf/Kconfig | 15 + drivers/perf/Makefile | 1 + drivers/perf/arm_pmu.c | 927 ++ drivers/phy/Kconfig | 13 + drivers/phy/Makefile | 1 + drivers/phy/phy-armada375-usb2.c | 3 +- drivers/phy/phy-bcm-kona-usb2.c | 2 +- drivers/phy/phy-berlin-sata.c | 3 +- drivers/phy/phy-berlin-usb.c | 13 +- drivers/phy/phy-brcmstb-sata.c | 2 +- drivers/phy/phy-dm816x-usb.c | 2 +- drivers/phy/phy-exynos-dp-video.c | 2 +- drivers/phy/phy-exynos-mipi-video.c | 2 +- drivers/phy/phy-exynos5-usbdrd.c | 2 +- drivers/phy/phy-exynos5250-sata.c | 2 +- drivers/phy/phy-hix5hd2-sata.c | 2 +- drivers/phy/phy-lpc18xx-usb-otg.c | 143 + drivers/phy/phy-miphy28lp.c | 3 +- drivers/phy/phy-miphy365x.c | 2 +- drivers/phy/phy-mvebu-sata.c | 2 +- drivers/phy/phy-omap-usb2.c | 2 +- drivers/phy/phy-qcom-apq8064-sata.c | 2 +- drivers/phy/phy-qcom-ipq806x-sata.c | 2 +- drivers/phy/phy-qcom-ufs-i.h | 2 +- drivers/phy/phy-qcom-ufs-qmp-14nm.c | 3 +- drivers/phy/phy-qcom-ufs-qmp-20nm.c | 3 +- drivers/phy/phy-qcom-ufs.c | 13 +- drivers/phy/phy-rcar-gen2.c | 2 +- drivers/phy/phy-rockchip-usb.c | 9 +- drivers/phy/phy-samsung-usb2.c | 2 +- drivers/phy/phy-spear1310-miphy.c | 2 +- drivers/phy/phy-spear1340-miphy.c | 2 +- drivers/phy/phy-stih41x-usb.c | 2 +- drivers/phy/phy-sun4i-usb.c | 423 +- drivers/phy/phy-sun9i-usb.c | 2 +- drivers/phy/phy-ti-pipe3.c | 2 +- drivers/phy/phy-tusb1210.c | 32 +- drivers/phy/ulpi_phy.h | 2 +- drivers/pinctrl/Kconfig | 7 + drivers/pinctrl/Makefile | 8 +- drivers/pinctrl/bcm/pinctrl-bcm2835.c | 5 +- drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c | 2 +- drivers/pinctrl/core.c | 6 +- drivers/pinctrl/devicetree.c | 8 +- drivers/pinctrl/freescale/Kconfig | 7 + drivers/pinctrl/freescale/Makefile | 1 + drivers/pinctrl/freescale/pinctrl-imx6ul.c | 322 + drivers/pinctrl/intel/pinctrl-baytrail.c | 70 +- drivers/pinctrl/intel/pinctrl-cherryview.c | 78 +- drivers/pinctrl/intel/pinctrl-intel.c | 8 +- drivers/pinctrl/mediatek/pinctrl-mt8173.c | 1 + drivers/pinctrl/mediatek/pinctrl-mtk-common.c | 101 +- drivers/pinctrl/mediatek/pinctrl-mtk-common.h | 4 + drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c | 21 - drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c | 24 - drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c | 30 +- drivers/pinctrl/nomadik/pinctrl-nomadik.c | 243 +- drivers/pinctrl/nomadik/pinctrl-nomadik.h | 4 - drivers/pinctrl/pinconf.c | 88 +- drivers/pinctrl/pinctrl-adi2-bf60x.c | 8 +- drivers/pinctrl/pinctrl-adi2.c | 7 +- drivers/pinctrl/pinctrl-amd.c | 17 +- drivers/pinctrl/pinctrl-at91.c | 38 +- drivers/pinctrl/pinctrl-coh901.c | 7 +- drivers/pinctrl/pinctrl-digicolor.c | 378 + drivers/pinctrl/pinctrl-lpc18xx.c | 54 +- drivers/pinctrl/pinctrl-pistachio.c | 10 +- drivers/pinctrl/pinctrl-rockchip.c | 71 +- drivers/pinctrl/pinctrl-single.c | 16 +- drivers/pinctrl/pinctrl-st.c | 10 +- drivers/pinctrl/pinctrl-tegra.c | 19 +- drivers/pinctrl/pinctrl-zynq.c | 4 +- drivers/pinctrl/pinmux.c | 6 +- drivers/pinctrl/qcom/Kconfig | 20 + drivers/pinctrl/qcom/Makefile | 3 + drivers/pinctrl/qcom/pinctrl-msm.c | 20 +- drivers/pinctrl/qcom/pinctrl-qdf2xxx.c | 122 + drivers/pinctrl/qcom/pinctrl-spmi-mpp.c | 376 +- drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c | 791 ++ drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c | 882 ++ drivers/pinctrl/samsung/pinctrl-exynos.c | 22 +- drivers/pinctrl/samsung/pinctrl-exynos5440.c | 90 +- drivers/pinctrl/samsung/pinctrl-s3c24xx.c | 36 +- drivers/pinctrl/samsung/pinctrl-s3c64xx.c | 44 +- drivers/pinctrl/sh-pfc/core.c | 52 +- drivers/pinctrl/sh-pfc/pfc-r8a7740.c | 4 - drivers/pinctrl/sh-pfc/pfc-r8a7790.c | 21 +- drivers/pinctrl/sh-pfc/pfc-r8a7791.c | 19 +- drivers/pinctrl/sh-pfc/pfc-r8a7794.c | 30 + drivers/pinctrl/sh-pfc/pfc-sh73a0.c | 4 - drivers/pinctrl/sh-pfc/pinctrl.c | 86 +- drivers/pinctrl/sh-pfc/sh_pfc.h | 5 + drivers/pinctrl/sirf/pinctrl-atlas7.c | 527 +- drivers/pinctrl/sirf/pinctrl-sirf.c | 7 +- drivers/pinctrl/spear/pinctrl-plgpio.c | 2 +- drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c | 17 +- drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c | 2 +- drivers/pinctrl/sunxi/pinctrl-sunxi.c | 60 +- drivers/pinctrl/uniphier/Kconfig | 32 + drivers/pinctrl/uniphier/Makefile | 8 + drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c | 886 ++ drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c | 1274 +++ drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c | 1554 +++ drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c | 1351 +++ drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c | 794 ++ drivers/pinctrl/uniphier/pinctrl-proxstream2.c | 1269 +++ drivers/pinctrl/uniphier/pinctrl-uniphier-core.c | 688 ++ drivers/pinctrl/uniphier/pinctrl-uniphier.h | 217 + drivers/platform/chrome/chromeos_laptop.c | 4 +- drivers/platform/x86/Kconfig | 5 + drivers/platform/x86/Makefile | 1 + drivers/platform/x86/acer-wmi.c | 1 + drivers/platform/x86/acerhdf.c | 9 +- drivers/platform/x86/asus-laptop.c | 1 + drivers/platform/x86/asus-nb-wmi.c | 18 + drivers/platform/x86/hp-wireless.c | 7 +- drivers/platform/x86/ideapad-laptop.c | 7 + drivers/platform/x86/intel_mid_thermal.c | 9 +- drivers/platform/x86/intel_pmc_ipc.c | 9 +- drivers/platform/x86/surfacepro3_button.c | 216 + drivers/platform/x86/thinkpad_acpi.c | 2 +- drivers/platform/x86/toshiba_acpi.c | 805 +- drivers/platform/x86/wmi.c | 51 +- drivers/pnp/manager.c | 2 - drivers/power/Kconfig | 17 +- drivers/power/avs/rockchip-io-domain.c | 59 + drivers/power/bq2415x_charger.c | 143 +- drivers/power/bq24190_charger.c | 4 +- drivers/power/bq24735-charger.c | 52 +- drivers/power/bq27x00_battery.c | 123 +- drivers/power/charger-manager.c | 2 +- drivers/power/ds2780_battery.c | 20 +- drivers/power/ds2781_battery.c | 8 - drivers/power/ltc2941-battery-gauge.c | 54 +- drivers/power/max77693_charger.c | 1 + drivers/power/olpc_battery.c | 7 +- drivers/power/pm2301_charger.c | 1 - drivers/power/power_supply_core.c | 2 +- drivers/power/reset/Kconfig | 7 + drivers/power/reset/Makefile | 1 + drivers/power/reset/at91-reset.c | 26 +- drivers/power/reset/zx-reboot.c | 80 + drivers/power/rt5033_battery.c | 2 +- drivers/power/rt9455_charger.c | 16 +- drivers/power/rx51_battery.c | 2 +- drivers/power/twl4030_charger.c | 592 +- drivers/powercap/intel_rapl.c | 8 +- drivers/pwm/Kconfig | 19 + drivers/pwm/Makefile | 2 + drivers/pwm/core.c | 49 +- drivers/pwm/pwm-atmel-hlcdc.c | 5 + drivers/pwm/pwm-atmel-tcb.c | 2 +- drivers/pwm/pwm-atmel.c | 6 +- drivers/pwm/pwm-bcm-kona.c | 54 +- drivers/pwm/pwm-crc.c | 143 + drivers/pwm/pwm-ep93xx.c | 4 +- drivers/pwm/pwm-imx.c | 5 +- drivers/pwm/pwm-lpc18xx-sct.c | 465 + drivers/pwm/pwm-mxs.c | 4 +- drivers/pwm/pwm-pca9685.c | 90 +- drivers/pwm/pwm-renesas-tpu.c | 2 +- drivers/pwm/pwm-rockchip.c | 2 +- drivers/pwm/pwm-tegra.c | 6 +- drivers/pwm/pwm-tiecap.c | 10 +- drivers/pwm/pwm-tiehrpwm.c | 6 +- drivers/pwm/sysfs.c | 29 +- drivers/ras/Kconfig | 37 +- drivers/regulator/88pm800.c | 236 +- drivers/regulator/Kconfig | 43 +- drivers/regulator/Makefile | 3 +- drivers/regulator/act8865-regulator.c | 1 - drivers/regulator/ad5398.c | 1 - drivers/regulator/anatop-regulator.c | 1 + drivers/regulator/axp20x-regulator.c | 1 + drivers/regulator/core.c | 154 +- drivers/regulator/da9062-regulator.c | 1 - drivers/regulator/da9210-regulator.c | 76 +- drivers/regulator/da9211-regulator.c | 41 +- drivers/regulator/da9211-regulator.h | 18 +- drivers/regulator/fan53555.c | 1 + drivers/regulator/gpio-regulator.c | 1 + drivers/regulator/isl6271a-regulator.c | 1 - drivers/regulator/isl9305.c | 2 +- drivers/regulator/lp3971.c | 1 - drivers/regulator/lp3972.c | 1 - drivers/regulator/lp872x.c | 17 +- drivers/regulator/ltc3589.c | 4 +- drivers/regulator/max1586.c | 1 - drivers/regulator/max77693.c | 173 +- drivers/regulator/max77843.c | 201 - drivers/regulator/max8660.c | 1 - drivers/regulator/max8973-regulator.c | 81 +- drivers/regulator/mt6311-regulator.c | 179 + drivers/regulator/mt6311-regulator.h | 65 + drivers/regulator/of_regulator.c | 3 + drivers/regulator/pbias-regulator.c | 56 +- drivers/regulator/pfuze100-regulator.c | 2 - drivers/regulator/pwm-regulator.c | 160 +- drivers/regulator/qcom_smd-regulator.c | 350 + drivers/regulator/qcom_spmi-regulator.c | 203 +- drivers/regulator/rk808-regulator.c | 212 +- drivers/regulator/tps51632-regulator.c | 1 - drivers/regulator/tps62360-regulator.c | 1 - drivers/regulator/tps65023-regulator.c | 1 - drivers/regulator/tps65218-regulator.c | 2 +- drivers/regulator/tps6586x-regulator.c | 4 +- drivers/regulator/vexpress.c | 1 + drivers/reset/Makefile | 3 + drivers/reset/reset-ath79.c | 129 + drivers/reset/reset-lpc18xx.c | 258 + drivers/reset/reset-socfpga.c | 19 +- drivers/reset/reset-zynq.c | 155 + drivers/reset/sti/reset-stih407.c | 4 +- drivers/reset/sti/reset-stih415.c | 4 +- drivers/reset/sti/reset-stih416.c | 4 +- drivers/rtc/Kconfig | 38 +- drivers/rtc/Makefile | 2 + drivers/rtc/class.c | 33 +- drivers/rtc/interface.c | 2 +- drivers/rtc/rtc-88pm80x.c | 28 +- drivers/rtc/rtc-ab-b5ze-s3.c | 2 +- drivers/rtc/rtc-ab8500.c | 2 + drivers/rtc/rtc-armada38x.c | 33 +- drivers/rtc/rtc-as3722.c | 4 +- drivers/rtc/rtc-at91rm9200.c | 43 +- drivers/rtc/rtc-at91sam9.c | 45 +- drivers/rtc/rtc-bfin.c | 2 +- drivers/rtc/rtc-bq32k.c | 3 +- drivers/rtc/rtc-cmos.c | 125 +- drivers/rtc/rtc-coh901331.c | 1 + drivers/rtc/rtc-core.h | 19 +- drivers/rtc/rtc-da9063.c | 392 +- drivers/rtc/rtc-dev.c | 1 + drivers/rtc/rtc-ds1305.c | 18 - drivers/rtc/rtc-ds1307.c | 120 +- drivers/rtc/rtc-ds1343.c | 12 - drivers/rtc/rtc-ds1374.c | 12 +- drivers/rtc/rtc-ds1511.c | 42 +- drivers/rtc/rtc-ds1553.c | 4 +- drivers/rtc/rtc-ds1685.c | 22 +- drivers/rtc/rtc-ds1742.c | 4 +- drivers/rtc/rtc-ds3232.c | 8 +- drivers/rtc/rtc-fm3130.c | 1 - drivers/rtc/rtc-gemini.c | 5 +- drivers/rtc/rtc-hym8563.c | 1 - drivers/rtc/rtc-isl12022.c | 8 +- drivers/rtc/rtc-isl12057.c | 2 +- drivers/rtc/rtc-lpc24xx.c | 310 + drivers/rtc/rtc-m48t59.c | 18 +- drivers/rtc/rtc-max8997.c | 1 + drivers/rtc/rtc-moxart.c | 1 + drivers/rtc/rtc-mpc5121.c | 1 + drivers/rtc/rtc-mt6397.c | 27 + drivers/rtc/rtc-mv.c | 1 + drivers/rtc/rtc-mxc.c | 60 +- drivers/rtc/rtc-omap.c | 33 + drivers/rtc/rtc-opal.c | 10 +- drivers/rtc/rtc-pcf2123.c | 8 +- drivers/rtc/rtc-pcf2127.c | 35 +- drivers/rtc/rtc-pcf85063.c | 1 - drivers/rtc/rtc-pcf8523.c | 1 - drivers/rtc/rtc-pcf8563.c | 1 - drivers/rtc/rtc-pcf8583.c | 1 - drivers/rtc/rtc-pl031.c | 2 +- drivers/rtc/rtc-pxa.c | 55 +- drivers/rtc/rtc-rp5c01.c | 4 +- drivers/rtc/rtc-rx8025.c | 277 +- drivers/rtc/rtc-rx8581.c | 1 - drivers/rtc/rtc-s3c.c | 4 +- drivers/rtc/rtc-s5m.c | 1 + drivers/rtc/rtc-sa1100.c | 139 +- drivers/rtc/rtc-sa1100.h | 23 + drivers/rtc/rtc-sirfsoc.c | 107 +- drivers/rtc/rtc-snvs.c | 132 +- drivers/rtc/rtc-st-lpc.c | 2 +- drivers/rtc/rtc-stk17ta8.c | 4 +- drivers/rtc/rtc-sysfs.c | 72 +- drivers/rtc/rtc-tx4939.c | 6 +- drivers/rtc/rtc-vt8500.c | 1 + drivers/rtc/rtc-zynqmp.c | 279 + drivers/s390/block/dasd_alias.c | 6 +- drivers/s390/block/dasd_eckd.c | 333 +- drivers/s390/block/dasd_eckd.h | 11 +- drivers/s390/block/dasd_int.h | 1 + drivers/s390/block/dcssblk.c | 28 +- drivers/s390/block/xpram.c | 5 +- drivers/s390/char/con3270.c | 4 + drivers/s390/char/ctrlchar.c | 16 +- drivers/s390/char/ctrlchar.h | 12 + drivers/s390/char/diag_ftp.c | 4 +- drivers/s390/char/monreader.c | 2 +- drivers/s390/char/sclp.c | 6 +- drivers/s390/char/sclp_cmd.c | 18 +- drivers/s390/char/sclp_vt220.c | 52 +- drivers/s390/char/tty3270.c | 4 + drivers/s390/cio/chsc.c | 165 +- drivers/s390/cio/device_ops.c | 2 +- drivers/s390/cio/eadm_sch.c | 3 +- drivers/s390/crypto/ap_bus.c | 2 +- drivers/s390/crypto/zcrypt_api.c | 10 +- drivers/s390/crypto/zcrypt_pcixcc.c | 2 +- drivers/s390/net/qeth_l2_main.c | 5 +- drivers/s390/net/qeth_l3_main.c | 5 +- drivers/s390/scsi/zfcp_aux.c | 2 +- drivers/s390/scsi/zfcp_erp.c | 62 +- drivers/s390/scsi/zfcp_fc.c | 8 +- drivers/s390/scsi/zfcp_fsf.c | 28 +- drivers/s390/scsi/zfcp_qdio.c | 14 +- drivers/s390/virtio/virtio_ccw.c | 10 +- drivers/scsi/53c700.c | 2 +- drivers/scsi/Kconfig | 1 + drivers/scsi/Makefile | 2 + drivers/scsi/a100u2w.c | 2 +- drivers/scsi/aic7xxx/aic7xxx_core.c | 2 +- drivers/scsi/aic94xx/aic94xx_init.c | 8 +- drivers/scsi/aic94xx/aic94xx_sds.c | 5 +- drivers/scsi/arcmsr/arcmsr_hba.c | 7 +- drivers/scsi/bfa/bfa_ioc.c | 24 +- drivers/scsi/bfa/bfad_im.c | 2 + drivers/scsi/bnx2fc/bnx2fc_fcoe.c | 66 + drivers/scsi/cxlflash/Kconfig | 11 + drivers/scsi/cxlflash/Makefile | 2 + drivers/scsi/cxlflash/common.h | 208 + drivers/scsi/cxlflash/lunmgt.c | 266 + drivers/scsi/cxlflash/main.c | 2494 +++++ drivers/scsi/cxlflash/main.h | 108 + drivers/scsi/cxlflash/sislite.h | 472 + drivers/scsi/cxlflash/superpipe.c | 2084 ++++ drivers/scsi/cxlflash/superpipe.h | 147 + drivers/scsi/cxlflash/vlun.c | 1243 +++ drivers/scsi/cxlflash/vlun.h | 86 + drivers/scsi/device_handler/Kconfig | 2 +- drivers/scsi/device_handler/Makefile | 1 - drivers/scsi/device_handler/scsi_dh.c | 621 -- drivers/scsi/device_handler/scsi_dh_alua.c | 31 +- drivers/scsi/device_handler/scsi_dh_emc.c | 58 +- drivers/scsi/device_handler/scsi_dh_hp_sw.c | 55 +- drivers/scsi/device_handler/scsi_dh_rdac.c | 80 +- drivers/scsi/dpt_i2o.c | 3 + drivers/scsi/fcoe/fcoe.c | 2 +- drivers/scsi/fcoe/fcoe_transport.c | 8 +- drivers/scsi/hpsa.c | 286 +- drivers/scsi/hpsa.h | 16 +- drivers/scsi/hpsa_cmd.h | 10 +- drivers/scsi/hptiop.c | 97 +- drivers/scsi/hptiop.h | 6 +- drivers/scsi/ipr.c | 21 +- drivers/scsi/ipr.h | 17 +- drivers/scsi/libfc/fc_fcp.c | 2 +- drivers/scsi/libiscsi.c | 26 +- drivers/scsi/lpfc/lpfc_hbadisc.c | 2 +- drivers/scsi/lpfc/lpfc_mbox.c | 7 +- drivers/scsi/megaraid.c | 140 +- drivers/scsi/megaraid/megaraid_sas_base.c | 544 +- drivers/scsi/megaraid/megaraid_sas_fusion.c | 95 +- drivers/scsi/mpt2sas/mpt2sas_base.c | 22 +- drivers/scsi/mpt2sas/mpt2sas_base.h | 41 +- drivers/scsi/mpt2sas/mpt2sas_ctl.c | 38 +- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 605 +- drivers/scsi/mpt2sas/mpt2sas_transport.c | 12 +- drivers/scsi/mpt3sas/mpi/mpi2.h | 8 +- drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h | 52 +- drivers/scsi/mpt3sas/mpi/mpi2_ioc.h | 4 +- drivers/scsi/mpt3sas/mpi/mpi2_tool.h | 4 +- drivers/scsi/mpt3sas/mpt3sas_base.c | 326 +- drivers/scsi/mpt3sas/mpt3sas_base.h | 57 +- drivers/scsi/mpt3sas/mpt3sas_scsih.c | 343 +- drivers/scsi/mpt3sas/mpt3sas_transport.c | 22 +- drivers/scsi/mvsas/mv_init.c | 20 +- drivers/scsi/mvsas/mv_sas.c | 2 + drivers/scsi/pm8001/pm8001_defs.h | 4 +- drivers/scsi/pm8001/pm8001_hwi.c | 5 + drivers/scsi/pm8001/pm8001_init.c | 5 +- drivers/scsi/pm8001/pm8001_sas.c | 19 +- drivers/scsi/pm8001/pm8001_sas.h | 12 +- drivers/scsi/pm8001/pm80xx_hwi.c | 112 +- drivers/scsi/pm8001/pm80xx_hwi.h | 5 +- drivers/scsi/qla2xxx/Kconfig | 6 +- drivers/scsi/qla2xxx/qla_attr.c | 24 +- drivers/scsi/qla2xxx/qla_bsg.c | 7 +- drivers/scsi/qla2xxx/qla_dbg.c | 102 +- drivers/scsi/qla2xxx/qla_def.h | 15 +- drivers/scsi/qla2xxx/qla_gs.c | 52 +- drivers/scsi/qla2xxx/qla_init.c | 162 +- drivers/scsi/qla2xxx/qla_iocb.c | 132 +- drivers/scsi/qla2xxx/qla_isr.c | 70 +- drivers/scsi/qla2xxx/qla_mbx.c | 80 +- drivers/scsi/qla2xxx/qla_mid.c | 3 +- drivers/scsi/qla2xxx/qla_mr.c | 22 +- drivers/scsi/qla2xxx/qla_nx.c | 165 +- drivers/scsi/qla2xxx/qla_nx2.c | 20 +- drivers/scsi/qla2xxx/qla_nx2.h | 6 +- drivers/scsi/qla2xxx/qla_os.c | 41 +- drivers/scsi/qla2xxx/qla_sup.c | 14 +- drivers/scsi/qla2xxx/qla_target.c | 139 +- drivers/scsi/qla2xxx/qla_tmpl.c | 27 +- drivers/scsi/qla2xxx/qla_version.h | 2 +- drivers/scsi/qla2xxx/tcm_qla2xxx.c | 19 +- drivers/scsi/scsi_common.c | 109 + drivers/scsi/scsi_debug.c | 158 +- drivers/scsi/scsi_dh.c | 441 + drivers/scsi/scsi_error.c | 83 +- drivers/scsi/scsi_lib.c | 19 +- drivers/scsi/scsi_priv.h | 11 + drivers/scsi/scsi_sysfs.c | 12 + drivers/scsi/scsi_transport_iscsi.c | 13 +- drivers/scsi/scsi_transport_sas.c | 10 - drivers/scsi/scsi_transport_spi.c | 4 +- drivers/scsi/sd.c | 4 +- drivers/scsi/st.c | 83 +- drivers/scsi/storvsc_drv.c | 224 +- drivers/scsi/sun3x_esp.c | 2 +- drivers/scsi/wd719x.c | 2 +- drivers/scsi/xen-scsifront.c | 10 +- drivers/sh/intc/chip.c | 6 +- drivers/sh/intc/core.c | 4 +- drivers/sh/intc/internals.h | 10 +- drivers/sh/intc/virq.c | 29 +- drivers/sh/pm_runtime.c | 19 +- drivers/soc/Makefile | 1 + drivers/soc/dove/Makefile | 1 + drivers/soc/dove/pmu.c | 412 + drivers/soc/mediatek/Kconfig | 19 + drivers/soc/mediatek/Makefile | 2 + drivers/soc/mediatek/mtk-infracfg.c | 91 + drivers/soc/mediatek/mtk-pmic-wrap.c | 1 - drivers/soc/mediatek/mtk-scpsys.c | 488 + drivers/soc/qcom/Kconfig | 31 + drivers/soc/qcom/Makefile | 3 + drivers/soc/qcom/smd-rpm.c | 244 + drivers/soc/qcom/smd.c | 1327 +++ drivers/soc/qcom/smem.c | 769 ++ drivers/soc/tegra/Makefile | 6 +- drivers/soc/tegra/common.c | 2 + drivers/soc/tegra/fuse/Makefile | 2 + drivers/soc/tegra/fuse/fuse-tegra.c | 257 +- drivers/soc/tegra/fuse/fuse-tegra20.c | 175 +- drivers/soc/tegra/fuse/fuse-tegra30.c | 232 +- drivers/soc/tegra/fuse/fuse.h | 95 +- drivers/soc/tegra/fuse/speedo-tegra114.c | 22 +- drivers/soc/tegra/fuse/speedo-tegra124.c | 26 +- drivers/soc/tegra/fuse/speedo-tegra20.c | 28 +- drivers/soc/tegra/fuse/speedo-tegra210.c | 184 + drivers/soc/tegra/fuse/speedo-tegra30.c | 48 +- drivers/soc/tegra/fuse/tegra-apbmisc.c | 76 +- drivers/soc/tegra/pmc.c | 121 +- drivers/spi/Kconfig | 20 + drivers/spi/Makefile | 2 + drivers/spi/spi-atmel.c | 3 +- drivers/spi/spi-bcm2835.c | 10 +- drivers/spi/spi-bcm63xx-hsspi.c | 13 +- drivers/spi/spi-davinci.c | 57 +- drivers/spi/spi-fsl-espi.c | 89 +- drivers/spi/spi-fsl-lib.c | 19 - drivers/spi/spi-fsl-lib.h | 3 - drivers/spi/spi-fsl-spi.c | 43 +- drivers/spi/spi-img-spfi.c | 14 + drivers/spi/spi-meson-spifc.c | 1 + drivers/spi/spi-mpc512x-psc.c | 70 +- drivers/spi/spi-mt65xx.c | 719 ++ drivers/spi/spi-pxa2xx-pci.c | 1 - drivers/spi/spi-pxa2xx.c | 66 +- drivers/spi/spi-pxa2xx.h | 5 - drivers/spi/spi-rockchip.c | 1 - drivers/spi/spi-rspi.c | 19 +- drivers/spi/spi-s3c24xx.c | 1 - drivers/spi/spi-s3c64xx.c | 4 +- drivers/spi/spi-sh-msiof.c | 5 - drivers/spi/spi-ti-qspi.c | 34 +- drivers/spi/spi-xcomm.c | 2 +- drivers/spi/spi-xlp.c | 456 + drivers/spi/spi.c | 235 +- drivers/spi/spidev.c | 8 +- drivers/spmi/Kconfig | 2 +- drivers/spmi/spmi-pmic-arb.c | 32 +- drivers/spmi/spmi.c | 22 +- drivers/staging/Kconfig | 8 +- drivers/staging/Makefile | 4 +- drivers/staging/android/Kconfig | 3 +- drivers/staging/android/TODO | 30 +- drivers/staging/android/ashmem.c | 11 +- drivers/staging/android/ion/ion.c | 26 +- drivers/staging/android/ion/ion_chunk_heap.c | 8 +- drivers/staging/android/ion/ion_cma_heap.c | 4 +- drivers/staging/android/ion/ion_page_pool.c | 5 +- drivers/staging/android/ion/ion_system_heap.c | 16 +- drivers/staging/android/ion/ion_test.c | 3 +- drivers/staging/android/sync.h | 10 +- drivers/staging/android/timed_gpio.c | 4 +- drivers/staging/board/armadillo800eva.c | 2 +- drivers/staging/board/board.c | 36 +- .../clocking-wizard/clk-xlnx-clock-wizard.c | 1 + drivers/staging/comedi/Kconfig | 2 +- drivers/staging/comedi/comedi_compat32.c | 3 +- drivers/staging/comedi/comedi_fops.c | 37 +- drivers/staging/comedi/drivers.c | 2 +- .../comedi/drivers/addi-data/hwdrv_apci1564.c | 93 +- .../comedi/drivers/addi-data/hwdrv_apci3501.c | 154 +- drivers/staging/comedi/drivers/addi_apci_1564.c | 35 +- drivers/staging/comedi/drivers/addi_apci_3501.c | 60 +- drivers/staging/comedi/drivers/addi_tcw.h | 63 +- drivers/staging/comedi/drivers/cb_pcimdas.c | 6 +- drivers/staging/comedi/drivers/dac02.c | 6 +- drivers/staging/comedi/drivers/das08_cs.c | 4 +- drivers/staging/comedi/drivers/das16.c | 3 +- drivers/staging/comedi/drivers/das16m1.c | 41 +- drivers/staging/comedi/drivers/dmm32at.c | 6 +- drivers/staging/comedi/drivers/fl512.c | 6 +- drivers/staging/comedi/drivers/ii_pci20kc.c | 1 + drivers/staging/comedi/drivers/me4000.c | 1000 +- drivers/staging/comedi/drivers/ni_daq_dio24.c | 6 +- drivers/staging/comedi/drivers/ni_usb6501.c | 27 +- drivers/staging/comedi/drivers/pcl816.c | 2 +- drivers/staging/comedi/drivers/s626.c | 6 +- drivers/staging/comedi/drivers/serial2002.c | 10 +- drivers/staging/comedi/drivers/usbduxsigma.c | 194 +- drivers/staging/comedi/range.c | 13 +- drivers/staging/dgap/dgap.c | 45 +- drivers/staging/dgnc/dgnc_driver.h | 6 +- drivers/staging/dgnc/dgnc_sysfs.h | 16 +- drivers/staging/emxx_udc/emxx_udc.c | 76 +- drivers/staging/fbtft/Kconfig | 9 +- drivers/staging/fbtft/Makefile | 1 + drivers/staging/fbtft/fb_uc1611.c | 350 + drivers/staging/fbtft/fb_watterott.c | 4 +- drivers/staging/fbtft/fbtft-core.c | 27 +- drivers/staging/fbtft/fbtft.h | 57 +- drivers/staging/fbtft/fbtft_device.c | 31 + drivers/staging/fbtft/flexfb.c | 269 +- drivers/staging/fsl-mc/README.txt | 364 + drivers/staging/fsl-mc/TODO | 28 +- drivers/staging/ft1000/ft1000-pcmcia/ft1000.h | 19 +- drivers/staging/ft1000/ft1000-usb/ft1000_debug.c | 31 +- .../staging/ft1000/ft1000-usb/ft1000_download.c | 24 +- drivers/staging/ft1000/ft1000-usb/ft1000_hw.c | 1 - drivers/staging/ft1000/ft1000-usb/ft1000_usb.h | 4 +- drivers/staging/gdm72xx/usb_ids.h | 6 +- drivers/staging/iio/accel/sca3000_ring.c | 2 +- drivers/staging/iio/adc/mxs-lradc.c | 131 +- drivers/staging/iio/addac/adt7316-i2c.c | 1 - drivers/staging/iio/iio_dummy_evgen.c | 1 + drivers/staging/iio/iio_simple_dummy.c | 2 - drivers/staging/iio/iio_simple_dummy.h | 1 + drivers/staging/iio/iio_simple_dummy_buffer.c | 2 +- drivers/staging/iio/iio_simple_dummy_events.c | 4 +- drivers/staging/iio/light/isl29018.c | 1 - drivers/staging/iio/light/isl29028.c | 1 - drivers/staging/iio/meter/ade7854.h | 4 +- drivers/staging/iio/trigger/iio-trig-bfin-timer.c | 7 +- .../staging/iio/trigger/iio-trig-periodic-rtc.c | 5 +- drivers/staging/lustre/README.txt | 16 +- .../staging/lustre/include/linux/libcfs/libcfs.h | 15 +- .../lustre/include/linux/libcfs/libcfs_debug.h | 1 - .../lustre/include/linux/libcfs/libcfs_fail.h | 18 +- .../lustre/include/linux/libcfs/libcfs_private.h | 28 +- .../lustre/include/linux/libcfs/libcfs_string.h | 2 - .../staging/lustre/include/linux/lnet/lib-lnet.h | 2 +- .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c | 265 +- .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h | 146 +- .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c | 58 +- .../lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c | 6 - .../staging/lustre/lnet/klnds/socklnd/socklnd.c | 5 - .../staging/lustre/lnet/klnds/socklnd/socklnd.h | 142 +- .../staging/lustre/lnet/klnds/socklnd/socklnd_cb.c | 3 +- drivers/staging/lustre/lnet/lnet/router_proc.c | 11 - drivers/staging/lustre/lnet/selftest/console.h | 90 +- drivers/staging/lustre/lnet/selftest/framework.c | 12 +- drivers/staging/lustre/lustre/fid/fid_request.c | 4 +- drivers/staging/lustre/lustre/fld/fld_cache.c | 2 +- drivers/staging/lustre/lustre/fld/fld_request.c | 2 +- .../lustre/lustre/include/linux/lustre_compat25.h | 119 - .../lustre/include/linux/lustre_patchless_compat.h | 12 - .../staging/lustre/lustre/include/lprocfs_status.h | 313 +- .../lustre/lustre/include/lustre/lustre_idl.h | 133 +- .../lustre/lustre/include/lustre/lustre_user.h | 2 +- drivers/staging/lustre/lustre/include/lustre_dlm.h | 2 +- .../staging/lustre/lustre/include/lustre_export.h | 4 +- .../staging/lustre/lustre/include/lustre_import.h | 4 +- drivers/staging/lustre/lustre/include/lustre_net.h | 18 +- drivers/staging/lustre/lustre/include/obd.h | 2 +- drivers/staging/lustre/lustre/include/obd_class.h | 17 +- .../staging/lustre/lustre/include/obd_support.h | 22 +- drivers/staging/lustre/lustre/lclient/lcommon_cl.c | 2 +- drivers/staging/lustre/lustre/ldlm/ldlm_internal.h | 2 +- drivers/staging/lustre/lustre/ldlm/ldlm_lib.c | 3 +- drivers/staging/lustre/lustre/ldlm/ldlm_lock.c | 4 +- drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c | 17 +- drivers/staging/lustre/lustre/ldlm/ldlm_pool.c | 8 +- drivers/staging/lustre/lustre/ldlm/ldlm_resource.c | 1 + drivers/staging/lustre/lustre/libcfs/debug.c | 149 +- drivers/staging/lustre/lustre/libcfs/fail.c | 2 +- drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c | 2 +- .../staging/lustre/lustre/libcfs/libcfs_string.c | 4 +- .../lustre/lustre/libcfs/linux/linux-crypto.c | 4 +- .../lustre/lustre/libcfs/linux/linux-module.c | 6 +- drivers/staging/lustre/lustre/libcfs/module.c | 361 +- drivers/staging/lustre/lustre/libcfs/tracefile.c | 12 - drivers/staging/lustre/lustre/libcfs/tracefile.h | 33 +- drivers/staging/lustre/lustre/llite/dcache.c | 3 +- drivers/staging/lustre/lustre/llite/dir.c | 6 +- drivers/staging/lustre/lustre/llite/file.c | 5 +- drivers/staging/lustre/lustre/llite/llite_capa.c | 40 +- .../staging/lustre/lustre/llite/llite_internal.h | 17 +- drivers/staging/lustre/lustre/llite/llite_lib.c | 23 +- drivers/staging/lustre/lustre/llite/lloop.c | 9 +- drivers/staging/lustre/lustre/llite/namei.c | 12 +- drivers/staging/lustre/lustre/llite/remote_perm.c | 17 +- drivers/staging/lustre/lustre/llite/vvp_io.c | 5 +- drivers/staging/lustre/lustre/llite/vvp_page.c | 27 +- drivers/staging/lustre/lustre/llite/xattr_cache.c | 2 +- drivers/staging/lustre/lustre/lmv/lmv_intent.c | 2 +- drivers/staging/lustre/lustre/lmv/lmv_obd.c | 8 +- drivers/staging/lustre/lustre/lov/lov_dev.c | 2 +- drivers/staging/lustre/lustre/lov/lov_io.c | 2 +- drivers/staging/lustre/lustre/lov/lov_merge.c | 1 + drivers/staging/lustre/lustre/lov/lov_obd.c | 13 +- drivers/staging/lustre/lustre/lov/lov_pool.c | 2 +- drivers/staging/lustre/lustre/lov/lov_request.c | 18 +- drivers/staging/lustre/lustre/mdc/mdc_lib.c | 2 +- drivers/staging/lustre/lustre/mdc/mdc_request.c | 6 +- drivers/staging/lustre/lustre/mgc/mgc_request.c | 6 +- drivers/staging/lustre/lustre/obdclass/acl.c | 6 +- drivers/staging/lustre/lustre/obdclass/cl_page.c | 30 +- drivers/staging/lustre/lustre/obdclass/class_obd.c | 19 +- drivers/staging/lustre/lustre/obdclass/genops.c | 22 +- .../lustre/lustre/obdclass/linux/linux-module.c | 6 +- .../lustre/lustre/obdclass/linux/linux-sysctl.c | 374 +- drivers/staging/lustre/lustre/obdclass/llog.c | 8 +- .../lustre/lustre/obdclass/lprocfs_status.c | 4 +- drivers/staging/lustre/lustre/obdclass/lu_object.c | 8 +- .../staging/lustre/lustre/obdclass/lustre_peer.c | 2 +- .../staging/lustre/lustre/obdclass/obd_config.c | 17 +- drivers/staging/lustre/lustre/obdclass/obd_mount.c | 24 +- drivers/staging/lustre/lustre/obdclass/uuid.c | 34 +- .../staging/lustre/lustre/obdecho/echo_client.c | 14 +- drivers/staging/lustre/lustre/osc/osc_cache.c | 6 - drivers/staging/lustre/lustre/osc/osc_dev.c | 2 +- drivers/staging/lustre/lustre/osc/osc_page.c | 2 +- drivers/staging/lustre/lustre/osc/osc_request.c | 8 +- drivers/staging/lustre/lustre/ptlrpc/client.c | 2 +- drivers/staging/lustre/lustre/ptlrpc/events.c | 4 +- drivers/staging/lustre/lustre/ptlrpc/import.c | 6 +- .../staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c | 8 +- drivers/staging/lustre/lustre/ptlrpc/nrs.c | 2 +- .../staging/lustre/lustre/ptlrpc/pack_generic.c | 2 + drivers/staging/lustre/lustre/ptlrpc/pinger.c | 2 - .../staging/lustre/lustre/ptlrpc/ptlrpc_module.c | 2 +- drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c | 2 +- drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c | 2 +- drivers/staging/lustre/lustre/ptlrpc/sec_config.c | 2 +- drivers/staging/lustre/lustre/ptlrpc/sec_plain.c | 2 +- drivers/staging/lustre/lustre/ptlrpc/service.c | 18 +- drivers/staging/lustre/sysfs-fs-lustre | 103 + drivers/staging/media/bcm2048/radio-bcm2048.c | 20 +- drivers/staging/media/lirc/lirc_imon.c | 10 +- drivers/staging/media/lirc/lirc_sasem.c | 2 +- drivers/staging/media/lirc/lirc_serial.c | 63 +- drivers/staging/media/lirc/lirc_zilog.c | 2 +- drivers/staging/media/mn88472/mn88472.c | 1 - drivers/staging/media/mn88473/mn88473.c | 1 - drivers/staging/media/omap4iss/Kconfig | 2 +- drivers/staging/media/omap4iss/TODO | 1 - drivers/staging/media/omap4iss/iss_video.c | 73 - .../most/Documentation/ABI/sysfs-class-most.txt | 181 + .../staging/most/Documentation/driver_usage.txt | 180 + drivers/staging/most/Kconfig | 31 + drivers/staging/most/Makefile | 8 + drivers/staging/most/TODO | 8 + drivers/staging/most/aim-cdev/Kconfig | 12 + drivers/staging/most/aim-cdev/Makefile | 4 + drivers/staging/most/aim-cdev/cdev.c | 528 + drivers/staging/most/aim-network/Kconfig | 13 + drivers/staging/most/aim-network/Makefile | 4 + drivers/staging/most/aim-network/networking.c | 567 + drivers/staging/most/aim-network/networking.h | 23 + drivers/staging/most/aim-sound/Kconfig | 13 + drivers/staging/most/aim-sound/Makefile | 4 + drivers/staging/most/aim-sound/sound.c | 758 ++ drivers/staging/most/aim-v4l2/Kconfig | 12 + drivers/staging/most/aim-v4l2/Makefile | 6 + drivers/staging/most/aim-v4l2/video.c | 635 ++ drivers/staging/most/hdm-dim2/Kconfig | 17 + drivers/staging/most/hdm-dim2/Makefile | 5 + drivers/staging/most/hdm-dim2/dim2_errors.h | 67 + drivers/staging/most/hdm-dim2/dim2_hal.c | 919 ++ drivers/staging/most/hdm-dim2/dim2_hal.h | 124 + drivers/staging/most/hdm-dim2/dim2_hdm.c | 964 ++ drivers/staging/most/hdm-dim2/dim2_hdm.h | 26 + drivers/staging/most/hdm-dim2/dim2_reg.h | 176 + drivers/staging/most/hdm-dim2/dim2_sysfs.c | 116 + drivers/staging/most/hdm-dim2/dim2_sysfs.h | 39 + drivers/staging/most/hdm-i2c/Kconfig | 12 + drivers/staging/most/hdm-i2c/Makefile | 3 + drivers/staging/most/hdm-i2c/hdm_i2c.c | 451 + drivers/staging/most/hdm-usb/Kconfig | 14 + drivers/staging/most/hdm-usb/Makefile | 4 + drivers/staging/most/hdm-usb/hdm_usb.c | 1454 +++ drivers/staging/most/mostcore/Kconfig | 14 + drivers/staging/most/mostcore/Makefile | 3 + drivers/staging/most/mostcore/core.c | 1932 ++++ drivers/staging/most/mostcore/mostcore.h | 316 + drivers/staging/mt29f_spinand/mt29f_spinand.c | 1 - drivers/staging/mt29f_spinand/mt29f_spinand.h | 4 +- drivers/staging/netlogic/platform_net.c | 2 +- drivers/staging/netlogic/xlr_net.h | 2 +- drivers/staging/nvec/nvec.h | 19 +- drivers/staging/octeon/ethernet-mdio.h | 2 +- drivers/staging/octeon/ethernet-rgmii.c | 7 +- drivers/staging/octeon/ethernet-rx.c | 133 +- drivers/staging/octeon/ethernet-tx.c | 29 +- drivers/staging/octeon/ethernet-util.h | 22 +- drivers/staging/octeon/ethernet.c | 8 +- drivers/staging/octeon/octeon-ethernet.h | 22 +- drivers/staging/olpc_dcon/olpc_dcon.h | 2 +- drivers/staging/ozwpan/Kconfig | 9 - drivers/staging/ozwpan/Makefile | 16 - drivers/staging/ozwpan/README | 25 - drivers/staging/ozwpan/TODO | 14 - drivers/staging/ozwpan/ozappif.h | 36 - drivers/staging/ozwpan/ozcdev.c | 554 - drivers/staging/ozwpan/ozcdev.h | 17 - drivers/staging/ozwpan/ozdbg.h | 54 - drivers/staging/ozwpan/ozeltbuf.c | 252 - drivers/staging/ozwpan/ozeltbuf.h | 65 - drivers/staging/ozwpan/ozhcd.c | 2301 ---- drivers/staging/ozwpan/ozhcd.h | 15 - drivers/staging/ozwpan/ozmain.c | 71 - drivers/staging/ozwpan/ozpd.c | 886 -- drivers/staging/ozwpan/ozpd.h | 134 - drivers/staging/ozwpan/ozproto.c | 813 -- drivers/staging/ozwpan/ozproto.h | 62 - drivers/staging/ozwpan/ozprotocol.h | 375 - drivers/staging/ozwpan/ozurbparanoia.c | 54 - drivers/staging/ozwpan/ozurbparanoia.h | 19 - drivers/staging/ozwpan/ozusbif.h | 43 - drivers/staging/ozwpan/ozusbsvc.c | 263 - drivers/staging/ozwpan/ozusbsvc.h | 32 - drivers/staging/ozwpan/ozusbsvc1.c | 471 - drivers/staging/panel/panel.c | 12 +- drivers/staging/rdma/Kconfig | 33 + drivers/staging/rdma/Makefile | 5 + drivers/staging/rdma/amso1100/Kbuild | 6 + drivers/staging/rdma/amso1100/Kconfig | 15 + drivers/staging/rdma/amso1100/TODO | 4 + drivers/staging/rdma/amso1100/c2.c | 1241 +++ drivers/staging/rdma/amso1100/c2.h | 547 + drivers/staging/rdma/amso1100/c2_ae.c | 327 + drivers/staging/rdma/amso1100/c2_ae.h | 108 + drivers/staging/rdma/amso1100/c2_alloc.c | 142 + drivers/staging/rdma/amso1100/c2_cm.c | 461 + drivers/staging/rdma/amso1100/c2_cq.c | 440 + drivers/staging/rdma/amso1100/c2_intr.c | 219 + drivers/staging/rdma/amso1100/c2_mm.c | 377 + drivers/staging/rdma/amso1100/c2_mq.c | 174 + drivers/staging/rdma/amso1100/c2_mq.h | 106 + drivers/staging/rdma/amso1100/c2_pd.c | 90 + drivers/staging/rdma/amso1100/c2_provider.c | 912 ++ drivers/staging/rdma/amso1100/c2_provider.h | 182 + drivers/staging/rdma/amso1100/c2_qp.c | 1024 ++ drivers/staging/rdma/amso1100/c2_rnic.c | 655 ++ drivers/staging/rdma/amso1100/c2_status.h | 158 + drivers/staging/rdma/amso1100/c2_user.h | 82 + drivers/staging/rdma/amso1100/c2_vq.c | 260 + drivers/staging/rdma/amso1100/c2_vq.h | 63 + drivers/staging/rdma/amso1100/c2_wr.h | 1520 +++ drivers/staging/rdma/ehca/Kconfig | 10 + drivers/staging/rdma/ehca/Makefile | 16 + drivers/staging/rdma/ehca/TODO | 4 + drivers/staging/rdma/ehca/ehca_av.c | 277 + drivers/staging/rdma/ehca/ehca_classes.h | 482 + drivers/staging/rdma/ehca/ehca_classes_pSeries.h | 208 + drivers/staging/rdma/ehca/ehca_cq.c | 397 + drivers/staging/rdma/ehca/ehca_eq.c | 189 + drivers/staging/rdma/ehca/ehca_hca.c | 414 + drivers/staging/rdma/ehca/ehca_irq.c | 870 ++ drivers/staging/rdma/ehca/ehca_irq.h | 77 + drivers/staging/rdma/ehca/ehca_iverbs.h | 218 + drivers/staging/rdma/ehca/ehca_main.c | 1123 ++ drivers/staging/rdma/ehca/ehca_mcast.c | 131 + drivers/staging/rdma/ehca/ehca_mrmw.c | 2593 +++++ drivers/staging/rdma/ehca/ehca_mrmw.h | 132 + drivers/staging/rdma/ehca/ehca_pd.c | 124 + drivers/staging/rdma/ehca/ehca_qes.h | 260 + drivers/staging/rdma/ehca/ehca_qp.c | 2257 ++++ drivers/staging/rdma/ehca/ehca_reqs.c | 953 ++ drivers/staging/rdma/ehca/ehca_sqp.c | 245 + drivers/staging/rdma/ehca/ehca_tools.h | 155 + drivers/staging/rdma/ehca/ehca_uverbs.c | 309 + drivers/staging/rdma/ehca/hcp_if.c | 949 ++ drivers/staging/rdma/ehca/hcp_if.h | 265 + drivers/staging/rdma/ehca/hcp_phyp.c | 82 + drivers/staging/rdma/ehca/hcp_phyp.h | 90 + drivers/staging/rdma/ehca/hipz_fns.h | 68 + drivers/staging/rdma/ehca/hipz_fns_core.h | 100 + drivers/staging/rdma/ehca/hipz_hw.h | 414 + drivers/staging/rdma/ehca/ipz_pt_fn.c | 289 + drivers/staging/rdma/ehca/ipz_pt_fn.h | 289 + drivers/staging/rdma/hfi1/Kconfig | 37 + drivers/staging/rdma/hfi1/Makefile | 19 + drivers/staging/rdma/hfi1/TODO | 6 + drivers/staging/rdma/hfi1/chip.c | 10798 +++++++++++++++++++ drivers/staging/rdma/hfi1/chip.h | 1035 ++ drivers/staging/rdma/hfi1/chip_registers.h | 1292 +++ drivers/staging/rdma/hfi1/common.h | 415 + drivers/staging/rdma/hfi1/cq.c | 558 + drivers/staging/rdma/hfi1/debugfs.c | 899 ++ drivers/staging/rdma/hfi1/debugfs.h | 78 + drivers/staging/rdma/hfi1/device.c | 184 + drivers/staging/rdma/hfi1/device.h | 62 + drivers/staging/rdma/hfi1/diag.c | 1873 ++++ drivers/staging/rdma/hfi1/dma.c | 186 + drivers/staging/rdma/hfi1/driver.c | 1241 +++ drivers/staging/rdma/hfi1/eprom.c | 475 + drivers/staging/rdma/hfi1/eprom.h | 55 + drivers/staging/rdma/hfi1/file_ops.c | 2144 ++++ drivers/staging/rdma/hfi1/firmware.c | 1620 +++ drivers/staging/rdma/hfi1/hfi.h | 1821 ++++ drivers/staging/rdma/hfi1/init.c | 1722 +++ drivers/staging/rdma/hfi1/intr.c | 207 + drivers/staging/rdma/hfi1/iowait.h | 186 + drivers/staging/rdma/hfi1/keys.c | 411 + drivers/staging/rdma/hfi1/mad.c | 4257 ++++++++ drivers/staging/rdma/hfi1/mad.h | 325 + drivers/staging/rdma/hfi1/mmap.c | 192 + drivers/staging/rdma/hfi1/mr.c | 551 + drivers/staging/rdma/hfi1/opa_compat.h | 129 + drivers/staging/rdma/hfi1/pcie.c | 1253 +++ drivers/staging/rdma/hfi1/pio.c | 1771 +++ drivers/staging/rdma/hfi1/pio.h | 224 + drivers/staging/rdma/hfi1/pio_copy.c | 858 ++ drivers/staging/rdma/hfi1/platform_config.h | 286 + drivers/staging/rdma/hfi1/qp.c | 1687 +++ drivers/staging/rdma/hfi1/qp.h | 235 + drivers/staging/rdma/hfi1/qsfp.c | 546 + drivers/staging/rdma/hfi1/qsfp.h | 222 + drivers/staging/rdma/hfi1/rc.c | 2426 +++++ drivers/staging/rdma/hfi1/ruc.c | 948 ++ drivers/staging/rdma/hfi1/sdma.c | 2962 +++++ drivers/staging/rdma/hfi1/sdma.h | 1123 ++ drivers/staging/rdma/hfi1/srq.c | 397 + drivers/staging/rdma/hfi1/sysfs.c | 739 ++ drivers/staging/rdma/hfi1/trace.c | 221 + drivers/staging/rdma/hfi1/trace.h | 1409 +++ drivers/staging/rdma/hfi1/twsi.c | 518 + drivers/staging/rdma/hfi1/twsi.h | 68 + drivers/staging/rdma/hfi1/uc.c | 585 + drivers/staging/rdma/hfi1/ud.c | 885 ++ drivers/staging/rdma/hfi1/user_pages.c | 156 + drivers/staging/rdma/hfi1/user_sdma.c | 1444 +++ drivers/staging/rdma/hfi1/user_sdma.h | 89 + drivers/staging/rdma/hfi1/verbs.c | 2145 ++++ drivers/staging/rdma/hfi1/verbs.h | 1151 ++ drivers/staging/rdma/hfi1/verbs_mcast.c | 385 + drivers/staging/rdma/ipath/Kconfig | 16 + drivers/staging/rdma/ipath/Makefile | 37 + drivers/staging/rdma/ipath/TODO | 5 + drivers/staging/rdma/ipath/ipath_common.h | 851 ++ drivers/staging/rdma/ipath/ipath_cq.c | 483 + drivers/staging/rdma/ipath/ipath_debug.h | 99 + drivers/staging/rdma/ipath/ipath_diag.c | 551 + drivers/staging/rdma/ipath/ipath_dma.c | 179 + drivers/staging/rdma/ipath/ipath_driver.c | 2789 +++++ drivers/staging/rdma/ipath/ipath_eeprom.c | 1183 ++ drivers/staging/rdma/ipath/ipath_file_ops.c | 2620 +++++ drivers/staging/rdma/ipath/ipath_fs.c | 422 + drivers/staging/rdma/ipath/ipath_iba6110.c | 1940 ++++ drivers/staging/rdma/ipath/ipath_init_chip.c | 1066 ++ drivers/staging/rdma/ipath/ipath_intr.c | 1273 +++ drivers/staging/rdma/ipath/ipath_kernel.h | 1373 +++ drivers/staging/rdma/ipath/ipath_keys.c | 270 + drivers/staging/rdma/ipath/ipath_mad.c | 1521 +++ drivers/staging/rdma/ipath/ipath_mmap.c | 174 + drivers/staging/rdma/ipath/ipath_mr.c | 425 + drivers/staging/rdma/ipath/ipath_qp.c | 1080 ++ drivers/staging/rdma/ipath/ipath_rc.c | 1969 ++++ drivers/staging/rdma/ipath/ipath_registers.h | 512 + drivers/staging/rdma/ipath/ipath_ruc.c | 734 ++ drivers/staging/rdma/ipath/ipath_sdma.c | 818 ++ drivers/staging/rdma/ipath/ipath_srq.c | 380 + drivers/staging/rdma/ipath/ipath_stats.c | 347 + drivers/staging/rdma/ipath/ipath_sysfs.c | 1238 +++ drivers/staging/rdma/ipath/ipath_uc.c | 547 + drivers/staging/rdma/ipath/ipath_ud.c | 580 + drivers/staging/rdma/ipath/ipath_user_pages.c | 229 + drivers/staging/rdma/ipath/ipath_user_sdma.c | 875 ++ drivers/staging/rdma/ipath/ipath_user_sdma.h | 52 + drivers/staging/rdma/ipath/ipath_verbs.c | 2365 ++++ drivers/staging/rdma/ipath/ipath_verbs.h | 939 ++ drivers/staging/rdma/ipath/ipath_verbs_mcast.c | 364 + drivers/staging/rdma/ipath/ipath_wc_ppc64.c | 49 + drivers/staging/rdma/ipath/ipath_wc_x86_64.c | 144 + drivers/staging/rtl8188eu/core/rtw_ap.c | 5 +- drivers/staging/rtl8188eu/core/rtw_debug.c | 6 +- drivers/staging/rtl8188eu/core/rtw_efuse.c | 2 +- drivers/staging/rtl8188eu/core/rtw_ieee80211.c | 4 +- drivers/staging/rtl8188eu/core/rtw_ioctl_set.c | 10 +- drivers/staging/rtl8188eu/core/rtw_mlme.c | 43 +- drivers/staging/rtl8188eu/core/rtw_mlme_ext.c | 10 +- drivers/staging/rtl8188eu/core/rtw_pwrctrl.c | 8 +- drivers/staging/rtl8188eu/core/rtw_recv.c | 44 +- drivers/staging/rtl8188eu/core/rtw_security.c | 2 +- drivers/staging/rtl8188eu/core/rtw_wlan_util.c | 2 + drivers/staging/rtl8188eu/core/rtw_xmit.c | 36 +- .../staging/rtl8188eu/hal/Hal8188ERateAdaptive.c | 8 +- drivers/staging/rtl8188eu/hal/bb_cfg.c | 6 +- drivers/staging/rtl8188eu/hal/hal_com.c | 27 +- drivers/staging/rtl8188eu/hal/hal_intf.c | 32 - drivers/staging/rtl8188eu/hal/odm.c | 11 +- drivers/staging/rtl8188eu/hal/rf.c | 2 +- drivers/staging/rtl8188eu/hal/rf_cfg.c | 2 +- drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c | 21 - drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c | 33 +- drivers/staging/rtl8188eu/hal/usb_halinit.c | 99 +- drivers/staging/rtl8188eu/include/HalVerDef.h | 84 - drivers/staging/rtl8188eu/include/hal_intf.h | 16 - drivers/staging/rtl8188eu/include/ieee80211.h | 54 - drivers/staging/rtl8188eu/include/osdep_service.h | 2 +- drivers/staging/rtl8188eu/include/recv_osdep.h | 3 +- drivers/staging/rtl8188eu/include/rtl8188e_cmd.h | 1 - drivers/staging/rtl8188eu/include/rtl8188e_hal.h | 8 - drivers/staging/rtl8188eu/include/rtw_mlme.h | 3 +- drivers/staging/rtl8188eu/include/rtw_mlme_ext.h | 8 +- drivers/staging/rtl8188eu/include/rtw_pwrctrl.h | 1 - drivers/staging/rtl8188eu/include/sta_info.h | 19 +- drivers/staging/rtl8188eu/include/wifi.h | 7 - drivers/staging/rtl8188eu/os_dep/ioctl_linux.c | 22 +- drivers/staging/rtl8188eu/os_dep/os_intfs.c | 2 +- drivers/staging/rtl8188eu/os_dep/recv_linux.c | 16 +- drivers/staging/rtl8188eu/os_dep/usb_intf.c | 79 +- drivers/staging/rtl8192e/dot11d.c | 39 - drivers/staging/rtl8192e/dot11d.h | 6 - drivers/staging/rtl8192e/rtl8192e/r8190P_def.h | 46 - drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c | 192 +- drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h | 11 +- drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c | 11 +- drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h | 5 +- drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c | 440 +- drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h | 58 +- .../staging/rtl8192e/rtl8192e/r8192E_firmware.c | 20 +- .../staging/rtl8192e/rtl8192e/r8192E_firmware.h | 6 +- drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h | 8 +- drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c | 448 +- drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h | 66 +- drivers/staging/rtl8192e/rtl8192e/rtl_cam.c | 125 +- drivers/staging/rtl8192e/rtl8192e/rtl_cam.h | 16 +- drivers/staging/rtl8192e/rtl8192e/rtl_core.c | 611 +- drivers/staging/rtl8192e/rtl8192e/rtl_core.h | 112 +- drivers/staging/rtl8192e/rtl8192e/rtl_dm.c | 472 +- drivers/staging/rtl8192e/rtl8192e/rtl_dm.h | 44 +- drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c | 130 +- drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h | 2 +- drivers/staging/rtl8192e/rtl8192e/rtl_pci.c | 2 +- drivers/staging/rtl8192e/rtl8192e/rtl_pci.h | 2 +- drivers/staging/rtl8192e/rtl8192e/rtl_pm.c | 24 +- drivers/staging/rtl8192e/rtl8192e/rtl_pm.h | 4 +- drivers/staging/rtl8192e/rtl8192e/rtl_ps.c | 53 +- drivers/staging/rtl8192e/rtl8192e/rtl_ps.h | 19 +- drivers/staging/rtl8192e/rtl8192e/rtl_wx.c | 132 +- drivers/staging/rtl8192e/rtl819x_BA.h | 5 - drivers/staging/rtl8192e/rtl819x_BAProc.c | 5 +- drivers/staging/rtl8192e/rtl819x_HT.h | 2 - drivers/staging/rtl8192e/rtl819x_HTProc.c | 5 +- drivers/staging/rtl8192e/rtl819x_Qos.h | 5 - drivers/staging/rtl8192e/rtl819x_TS.h | 2 - drivers/staging/rtl8192e/rtl819x_TSProc.c | 2 +- drivers/staging/rtl8192e/rtllib.h | 439 +- drivers/staging/rtl8192e/rtllib_debug.h | 8 - drivers/staging/rtl8192e/rtllib_rx.c | 91 +- drivers/staging/rtl8192e/rtllib_softmac.c | 30 +- drivers/staging/rtl8192e/rtllib_tx.c | 9 +- drivers/staging/rtl8192u/ieee80211/ieee80211.h | 345 +- drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c | 4 +- .../staging/rtl8192u/ieee80211/ieee80211_softmac.c | 6 +- .../rtl8192u/ieee80211/ieee80211_softmac_wx.c | 2 +- drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c | 4 +- .../staging/rtl8192u/ieee80211/rtl819x_BAProc.c | 17 +- .../staging/rtl8192u/ieee80211/rtl819x_HTProc.c | 16 +- .../staging/rtl8192u/ieee80211/rtl819x_TSProc.c | 4 +- drivers/staging/rtl8192u/r8190_rtl8256.h | 11 +- drivers/staging/rtl8192u/r8192U.h | 2 +- drivers/staging/rtl8192u/r8192U_core.c | 14 +- drivers/staging/rtl8192u/r8192U_dm.c | 8 +- drivers/staging/rtl8192u/r8192U_dm.h | 36 +- drivers/staging/rtl8192u/r8192U_wx.h | 2 +- drivers/staging/rtl8192u/r819xU_cmdpkt.h | 8 +- drivers/staging/rtl8192u/r819xU_firmware.c | 15 +- drivers/staging/rtl8192u/r819xU_phy.h | 55 +- drivers/staging/rtl8712/ieee80211.c | 25 +- drivers/staging/rtl8712/rtl8712_recv.c | 4 +- drivers/staging/rtl8712/rtl871x_cmd.c | 28 +- drivers/staging/rtl8712/rtl871x_cmd.h | 18 - drivers/staging/rtl8712/rtl871x_event.h | 2 +- drivers/staging/rtl8712/rtl871x_ioctl.h | 28 +- drivers/staging/rtl8712/rtl871x_ioctl_linux.c | 34 +- drivers/staging/rtl8712/rtl871x_mlme.c | 47 +- drivers/staging/rtl8712/rtl871x_mlme.h | 2 +- drivers/staging/rtl8712/rtl871x_mp_ioctl.c | 6 +- drivers/staging/rtl8712/rtl871x_security.c | 35 +- drivers/staging/rtl8712/usb_intf.c | 1 + drivers/staging/rtl8712/wlan_bssdef.h | 42 +- drivers/staging/rtl8723au/core/rtw_recv.c | 3 +- drivers/staging/rtl8723au/core/rtw_security.c | 24 +- drivers/staging/rtl8723au/hal/odm.c | 2 +- drivers/staging/rtl8723au/hal/odm_RegConfig8723A.c | 2 +- drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c | 4 +- drivers/staging/rts5208/ms.c | 5 +- drivers/staging/rts5208/sd.c | 21 +- drivers/staging/slicoss/slicoss.c | 1 + drivers/staging/sm750fb/ddk750_chip.c | 15 +- drivers/staging/sm750fb/ddk750_chip.h | 12 +- drivers/staging/sm750fb/ddk750_display.c | 170 +- drivers/staging/sm750fb/ddk750_display.h | 11 +- drivers/staging/sm750fb/ddk750_dvi.c | 75 +- drivers/staging/sm750fb/ddk750_dvi.h | 3 +- drivers/staging/sm750fb/ddk750_help.c | 8 +- drivers/staging/sm750fb/ddk750_help.h | 4 +- drivers/staging/sm750fb/ddk750_hwi2c.c | 244 +- drivers/staging/sm750fb/ddk750_mode.c | 164 +- drivers/staging/sm750fb/ddk750_mode.h | 52 +- drivers/staging/sm750fb/ddk750_power.c | 261 +- drivers/staging/sm750fb/ddk750_power.h | 11 +- drivers/staging/sm750fb/ddk750_reg.h | 18 +- drivers/staging/sm750fb/ddk750_sii164.c | 389 +- drivers/staging/sm750fb/ddk750_sii164.h | 31 +- drivers/staging/sm750fb/sm750.c | 160 +- drivers/staging/sm750fb/sm750.h | 72 +- drivers/staging/sm750fb/sm750_accel.c | 381 +- drivers/staging/sm750fb/sm750_accel.h | 4 +- drivers/staging/sm750fb/sm750_cursor.c | 55 +- drivers/staging/sm750fb/sm750_cursor.h | 4 +- drivers/staging/sm750fb/sm750_help.h | 28 +- drivers/staging/sm750fb/sm750_hw.c | 414 +- drivers/staging/sm750fb/sm750_hw.h | 48 +- drivers/staging/sm7xxfb/Kconfig | 13 - drivers/staging/sm7xxfb/Makefile | 1 - drivers/staging/sm7xxfb/TODO | 12 - drivers/staging/sm7xxfb/sm7xx.h | 117 - drivers/staging/sm7xxfb/sm7xxfb.c | 1686 --- drivers/staging/speakup/buffers.c | 3 +- drivers/staging/speakup/i18n.c | 3 +- drivers/staging/speakup/i18n.h | 12 +- drivers/staging/speakup/keyhelp.c | 2 +- drivers/staging/speakup/kobjects.c | 3 +- drivers/staging/speakup/main.c | 15 +- drivers/staging/speakup/selection.c | 3 +- drivers/staging/speakup/serialio.c | 10 +- drivers/staging/speakup/speakup.h | 68 +- drivers/staging/speakup/speakup_acnt.h | 8 +- drivers/staging/speakup/speakup_decpc.c | 6 +- drivers/staging/speakup/speakup_dtlk.h | 52 +- drivers/staging/speakup/speakup_soft.c | 1 - drivers/staging/speakup/thread.c | 3 +- drivers/staging/speakup/varhandlers.c | 3 +- drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c | 1 - drivers/staging/unisys/Kconfig | 2 +- drivers/staging/unisys/include/channel_guid.h | 24 +- drivers/staging/unisys/include/visorbus.h | 5 +- drivers/staging/unisys/visorbus/Makefile | 1 - drivers/staging/unisys/visorbus/controlvmchannel.h | 9 +- .../unisys/visorbus/controlvmcompletionstatus.h | 9 +- drivers/staging/unisys/visorbus/iovmcall_gnuc.h | 9 +- drivers/staging/unisys/visorbus/periodic_work.c | 9 +- drivers/staging/unisys/visorbus/vbuschannel.h | 9 +- drivers/staging/unisys/visorbus/vbusdeviceinfo.h | 9 +- drivers/staging/unisys/visorbus/visorbus_main.c | 122 +- drivers/staging/unisys/visorbus/visorbus_private.h | 9 +- drivers/staging/unisys/visorbus/visorchannel.c | 59 +- drivers/staging/unisys/visorbus/visorchipset.c | 36 +- drivers/staging/unisys/visorbus/vmcallinterface.h | 9 +- drivers/staging/unisys/visornic/visornic_main.c | 1140 +- drivers/staging/vme/devices/vme_pio2_core.c | 18 +- drivers/staging/vme/devices/vme_user.c | 168 +- drivers/staging/vt6655/baseband.c | 6 +- drivers/staging/vt6655/card.c | 15 +- drivers/staging/vt6655/desc.h | 146 +- drivers/staging/vt6655/device.h | 13 +- drivers/staging/vt6655/device_cfg.h | 15 - drivers/staging/vt6655/device_main.c | 146 +- drivers/staging/vt6655/dpc.c | 2 +- drivers/staging/vt6655/mac.c | 18 - drivers/staging/vt6655/power.c | 16 +- drivers/staging/vt6655/rf.c | 532 +- drivers/staging/vt6655/rf.h | 24 +- drivers/staging/vt6655/rxtx.c | 23 +- drivers/staging/vt6655/rxtx.h | 4 +- drivers/staging/vt6655/upc.h | 36 +- drivers/staging/vt6656/rxtx.c | 7 +- drivers/staging/wilc1000/Kconfig | 28 +- drivers/staging/wilc1000/Makefile | 8 +- drivers/staging/wilc1000/coreconfigsimulator.h | 17 - drivers/staging/wilc1000/coreconfigurator.c | 225 +- drivers/staging/wilc1000/coreconfigurator.h | 37 +- drivers/staging/wilc1000/fifo_buffer.c | 133 - drivers/staging/wilc1000/fifo_buffer.h | 26 - drivers/staging/wilc1000/host_interface.c | 1433 ++- drivers/staging/wilc1000/host_interface.h | 115 +- drivers/staging/wilc1000/linux_mon.c | 25 +- drivers/staging/wilc1000/linux_wlan.c | 248 +- drivers/staging/wilc1000/linux_wlan_common.h | 4 +- drivers/staging/wilc1000/linux_wlan_sdio.c | 1 - drivers/staging/wilc1000/wilc_debugfs.c | 30 +- drivers/staging/wilc1000/wilc_exported_buf.c | 13 +- drivers/staging/wilc1000/wilc_log.h | 47 - drivers/staging/wilc1000/wilc_memory.c | 46 +- drivers/staging/wilc1000/wilc_memory.h | 173 - drivers/staging/wilc1000/wilc_msgqueue.c | 28 +- drivers/staging/wilc1000/wilc_msgqueue.h | 35 +- drivers/staging/wilc1000/wilc_osconfig.h | 9 - drivers/staging/wilc1000/wilc_oswrapper.h | 12 - drivers/staging/wilc1000/wilc_platform.h | 8 +- drivers/staging/wilc1000/wilc_sdio.c | 288 +- drivers/staging/wilc1000/wilc_sleep.c | 18 - drivers/staging/wilc1000/wilc_sleep.h | 20 - drivers/staging/wilc1000/wilc_spi.c | 15 +- drivers/staging/wilc1000/wilc_strutils.c | 80 - drivers/staging/wilc1000/wilc_strutils.h | 134 - drivers/staging/wilc1000/wilc_timer.c | 45 - drivers/staging/wilc1000/wilc_timer.h | 129 - drivers/staging/wilc1000/wilc_type.h | 34 - drivers/staging/wilc1000/wilc_wfi_cfgoperations.c | 321 +- drivers/staging/wilc1000/wilc_wfi_cfgoperations.h | 2 +- drivers/staging/wilc1000/wilc_wfi_netdevice.c | 951 -- drivers/staging/wilc1000/wilc_wfi_netdevice.h | 16 +- drivers/staging/wilc1000/wilc_wlan.c | 53 +- drivers/staging/wilc1000/wilc_wlan.h | 2 +- drivers/staging/wilc1000/wilc_wlan_cfg.c | 16 +- drivers/staging/wilc1000/wilc_wlan_if.h | 6 +- drivers/staging/xgifb/Makefile | 2 +- drivers/staging/xgifb/XGI_main_26.c | 12 +- drivers/staging/xgifb/vb_init.h | 4 +- drivers/staging/xgifb/vb_setmode.h | 34 +- drivers/staging/xgifb/vb_util.c | 42 - drivers/staging/xgifb/vb_util.h | 44 +- drivers/target/iscsi/iscsi_target.c | 89 +- drivers/target/iscsi/iscsi_target.h | 4 +- drivers/target/iscsi/iscsi_target_configfs.c | 24 +- drivers/target/iscsi/iscsi_target_device.c | 10 +- drivers/target/iscsi/iscsi_target_login.c | 78 +- drivers/target/iscsi/iscsi_target_login.h | 4 +- drivers/target/iscsi/iscsi_target_nego.c | 7 +- drivers/target/iscsi/iscsi_target_stat.c | 2 +- drivers/target/iscsi/iscsi_target_tmr.c | 2 +- drivers/target/iscsi/iscsi_target_tpg.c | 31 +- drivers/target/iscsi/iscsi_target_tpg.h | 3 +- drivers/target/iscsi/iscsi_target_util.c | 39 +- drivers/target/loopback/tcm_loop.c | 22 +- drivers/target/target_core_device.c | 51 +- drivers/target/target_core_fabric_configfs.c | 2 +- drivers/target/target_core_hba.c | 5 + drivers/target/target_core_iblock.c | 23 +- drivers/target/target_core_pscsi.c | 6 +- drivers/target/target_core_rd.c | 44 - drivers/target/target_core_sbc.c | 49 +- drivers/target/target_core_spc.c | 55 +- drivers/target/target_core_tpg.c | 22 +- drivers/target/target_core_transport.c | 507 +- drivers/target/target_core_user.c | 14 +- drivers/target/tcm_fc/tfc_cmd.c | 2 +- drivers/thermal/Kconfig | 25 +- drivers/thermal/Makefile | 1 + drivers/thermal/armada_thermal.c | 2 +- drivers/thermal/db8500_cpufreq_cooling.c | 1 + drivers/thermal/db8500_thermal.c | 7 +- drivers/thermal/dove_thermal.c | 2 +- drivers/thermal/fair_share.c | 2 +- drivers/thermal/gov_bang_bang.c | 5 +- drivers/thermal/hisi_thermal.c | 4 +- drivers/thermal/imx_thermal.c | 27 +- drivers/thermal/int340x_thermal/int3400_thermal.c | 2 +- .../thermal/int340x_thermal/int340x_thermal_zone.c | 10 +- .../thermal/int340x_thermal/int340x_thermal_zone.h | 8 +- .../int340x_thermal/processor_thermal_device.c | 4 +- drivers/thermal/intel_pch_thermal.c | 283 + drivers/thermal/intel_powerclamp.c | 7 +- drivers/thermal/intel_quark_dts_thermal.c | 13 +- drivers/thermal/intel_soc_dts_iosf.c | 8 +- drivers/thermal/kirkwood_thermal.c | 2 +- drivers/thermal/of-thermal.c | 14 +- drivers/thermal/power_allocator.c | 267 +- drivers/thermal/qcom-spmi-temp-alarm.c | 2 +- drivers/thermal/rcar_thermal.c | 7 +- drivers/thermal/rockchip_thermal.c | 10 +- drivers/thermal/samsung/exynos_tmu.c | 25 +- drivers/thermal/spear_thermal.c | 2 +- drivers/thermal/st/st_thermal.c | 7 +- drivers/thermal/step_wise.c | 4 +- drivers/thermal/tegra_soctherm.c | 4 +- drivers/thermal/thermal_core.c | 137 +- drivers/thermal/thermal_hwmon.c | 10 +- drivers/thermal/ti-soc-thermal/Kconfig | 8 +- drivers/thermal/ti-soc-thermal/ti-thermal-common.c | 10 +- drivers/thermal/x86_pkg_temp_thermal.c | 10 +- drivers/thunderbolt/nhi.c | 2 +- drivers/tty/hvc/hvc_console.c | 2 +- drivers/tty/hvc/hvc_xen.c | 20 +- drivers/tty/hvc/hvsi.c | 46 +- drivers/tty/mips_ejtag_fdc.c | 44 +- drivers/tty/n_gsm.c | 2 +- drivers/tty/n_tty.c | 17 +- drivers/tty/pty.c | 8 + drivers/tty/serial/8250/8250.h | 17 +- drivers/tty/serial/8250/8250_core.c | 3270 +----- drivers/tty/serial/8250/8250_dma.c | 4 - drivers/tty/serial/8250/8250_dw.c | 27 +- drivers/tty/serial/8250/8250_early.c | 4 +- drivers/tty/serial/8250/8250_fintek.c | 172 +- drivers/tty/serial/8250/8250_ingenic.c | 1 - drivers/tty/serial/8250/8250_omap.c | 133 +- drivers/tty/serial/8250/8250_pci.c | 82 + drivers/tty/serial/8250/8250_port.c | 2922 +++++ drivers/tty/serial/8250/Makefile | 5 +- drivers/tty/serial/Kconfig | 6 +- drivers/tty/serial/atmel_serial.c | 484 +- drivers/tty/serial/etraxfs-uart.c | 59 +- drivers/tty/serial/imx.c | 183 +- drivers/tty/serial/lantiq.c | 8 +- drivers/tty/serial/men_z135_uart.c | 1 - drivers/tty/serial/mpc52xx_uart.c | 5 +- drivers/tty/serial/mxs-auart.c | 58 +- drivers/tty/serial/samsung.c | 1 - drivers/tty/serial/sc16is7xx.c | 113 +- drivers/tty/serial/serial_core.c | 25 +- drivers/tty/serial/sirfsoc_uart.c | 126 +- drivers/tty/serial/sirfsoc_uart.h | 5 +- drivers/tty/serial/sn_console.c | 32 +- drivers/tty/serial/stm32-usart.c | 2 +- drivers/tty/serial/suncore.c | 11 +- drivers/tty/serial/sunhv.c | 13 +- drivers/tty/serial/ucc_uart.c | 2 +- drivers/tty/sysrq.c | 11 +- drivers/tty/tty_audit.c | 2 +- drivers/tty/tty_buffer.c | 34 +- drivers/tty/tty_io.c | 117 +- drivers/tty/tty_ioctl.c | 15 +- drivers/tty/tty_ldisc.c | 15 +- drivers/uio/Kconfig | 2 +- drivers/uio/uio.c | 1 + drivers/uio/uio_fsl_elbc_gpcm.c | 14 +- drivers/usb/atm/cxacru.c | 7 +- drivers/usb/chipidea/bits.h | 12 + drivers/usb/chipidea/ci.h | 9 +- drivers/usb/chipidea/ci_hdrc_imx.c | 148 +- drivers/usb/chipidea/ci_hdrc_usb2.c | 25 +- drivers/usb/chipidea/core.c | 128 +- drivers/usb/chipidea/debug.c | 9 + drivers/usb/chipidea/host.c | 26 +- drivers/usb/chipidea/otg_fsm.c | 1 - drivers/usb/chipidea/udc.c | 47 +- drivers/usb/chipidea/usbmisc_imx.c | 12 +- drivers/usb/class/usblp.c | 68 +- drivers/usb/common/common.c | 56 + drivers/usb/core/devio.c | 19 +- drivers/usb/core/driver.c | 1 + drivers/usb/core/endpoint.c | 2 +- drivers/usb/core/hcd.c | 19 +- drivers/usb/core/hub.c | 70 +- drivers/usb/core/otg_whitelist.h | 2 +- drivers/usb/core/sysfs.c | 31 + drivers/usb/dwc2/core.c | 2 +- drivers/usb/dwc2/gadget.c | 15 +- drivers/usb/dwc3/Kconfig | 7 - drivers/usb/dwc3/Makefile | 2 - drivers/usb/dwc3/core.c | 18 +- drivers/usb/dwc3/core.h | 22 + drivers/usb/dwc3/dwc3-exynos.c | 2 +- drivers/usb/dwc3/dwc3-keystone.c | 2 +- drivers/usb/dwc3/dwc3-omap.c | 79 +- drivers/usb/dwc3/dwc3-pci.c | 52 +- drivers/usb/dwc3/dwc3-qcom.c | 4 +- drivers/usb/dwc3/dwc3-st.c | 4 - drivers/usb/dwc3/ep0.c | 92 +- drivers/usb/dwc3/gadget.c | 122 +- drivers/usb/dwc3/platform_data.h | 1 + drivers/usb/gadget/composite.c | 41 +- drivers/usb/gadget/config.c | 56 + drivers/usb/gadget/configfs.c | 29 +- drivers/usb/gadget/epautoconf.c | 281 +- drivers/usb/gadget/function/f_acm.c | 1 - drivers/usb/gadget/function/f_ecm.c | 4 +- drivers/usb/gadget/function/f_fs.c | 8 +- drivers/usb/gadget/function/f_loopback.c | 5 - drivers/usb/gadget/function/f_mass_storage.c | 164 +- drivers/usb/gadget/function/f_mass_storage.h | 6 +- drivers/usb/gadget/function/f_midi.c | 4 + drivers/usb/gadget/function/f_ncm.c | 5 +- drivers/usb/gadget/function/f_obex.c | 22 +- drivers/usb/gadget/function/f_printer.c | 7 +- drivers/usb/gadget/function/f_serial.c | 1 - drivers/usb/gadget/function/f_sourcesink.c | 6 - drivers/usb/gadget/function/f_uac2.c | 2 +- drivers/usb/gadget/function/f_uvc.c | 7 +- drivers/usb/gadget/function/storage_common.h | 2 +- drivers/usb/gadget/function/u_ether.h | 4 +- drivers/usb/gadget/function/u_serial.c | 5 + drivers/usb/gadget/function/u_uac1.h | 2 - drivers/usb/gadget/legacy/Kconfig | 2 + drivers/usb/gadget/legacy/acm_ms.c | 41 +- drivers/usb/gadget/legacy/audio.c | 41 +- drivers/usb/gadget/legacy/cdc2.c | 35 +- drivers/usb/gadget/legacy/dbgp.c | 10 +- drivers/usb/gadget/legacy/ether.c | 36 +- drivers/usb/gadget/legacy/g_ffs.c | 32 +- drivers/usb/gadget/legacy/gmidi.c | 8 +- drivers/usb/gadget/legacy/hid.c | 37 +- drivers/usb/gadget/legacy/mass_storage.c | 41 +- drivers/usb/gadget/legacy/multi.c | 43 +- drivers/usb/gadget/legacy/ncm.c | 34 +- drivers/usb/gadget/legacy/nokia.c | 105 +- drivers/usb/gadget/legacy/printer.c | 51 +- drivers/usb/gadget/legacy/serial.c | 38 +- drivers/usb/gadget/legacy/zero.c | 41 +- drivers/usb/gadget/udc/amd5536udc.c | 131 +- drivers/usb/gadget/udc/at91_udc.c | 39 +- drivers/usb/gadget/udc/atmel_usba_udc.c | 29 +- drivers/usb/gadget/udc/bcm63xx_udc.c | 29 +- drivers/usb/gadget/udc/bdc/bdc.h | 2 +- drivers/usb/gadget/udc/bdc/bdc_core.c | 3 +- drivers/usb/gadget/udc/bdc/bdc_ep.c | 13 +- drivers/usb/gadget/udc/dummy_hcd.c | 141 +- drivers/usb/gadget/udc/fotg210-udc.c | 29 +- drivers/usb/gadget/udc/fsl_qe_udc.c | 11 + drivers/usb/gadget/udc/fsl_udc_core.c | 13 + drivers/usb/gadget/udc/fusb300_udc.c | 11 + drivers/usb/gadget/udc/gadget_chips.h | 55 - drivers/usb/gadget/udc/goku_udc.c | 38 + drivers/usb/gadget/udc/gr_udc.c | 14 +- drivers/usb/gadget/udc/lpc32xx_udc.c | 32 + drivers/usb/gadget/udc/m66592-udc.c | 11 + drivers/usb/gadget/udc/mv_u3d_core.c | 12 +- drivers/usb/gadget/udc/mv_udc_core.c | 12 +- drivers/usb/gadget/udc/net2272.c | 15 +- drivers/usb/gadget/udc/net2280.c | 97 +- drivers/usb/gadget/udc/omap_udc.c | 22 + drivers/usb/gadget/udc/pch_udc.c | 52 +- drivers/usb/gadget/udc/pxa25x_udc.c | 30 + drivers/usb/gadget/udc/pxa27x_udc.c | 3 +- drivers/usb/gadget/udc/pxa27x_udc.h | 40 +- drivers/usb/gadget/udc/r8a66597-udc.c | 10 + drivers/usb/gadget/udc/s3c-hsudc.c | 15 + drivers/usb/gadget/udc/s3c2410_udc.c | 10 + drivers/usb/gadget/udc/udc-core.c | 90 + drivers/usb/gadget/udc/udc-xilinx.c | 9 + drivers/usb/host/Kconfig | 17 +- drivers/usb/host/bcma-hcd.c | 128 +- drivers/usb/host/ehci-fsl.c | 53 +- drivers/usb/host/ehci-fsl.h | 1 + drivers/usb/host/ehci-hub.c | 7 + drivers/usb/host/ehci-orion.c | 3 +- drivers/usb/host/ehci-platform.c | 13 +- drivers/usb/host/ehci-st.c | 7 +- drivers/usb/host/ehci.h | 12 + drivers/usb/host/fsl-mph-dr-of.c | 29 +- drivers/usb/host/ohci-at91.c | 179 +- drivers/usb/host/oxu210hp-hcd.c | 7 +- drivers/usb/host/u132-hcd.c | 35 +- drivers/usb/host/xhci-dbg.c | 4 + drivers/usb/host/xhci-pci.c | 1 + drivers/usb/host/xhci-ring.c | 245 +- drivers/usb/host/xhci.c | 18 +- drivers/usb/host/xhci.h | 13 +- drivers/usb/isp1760/isp1760-udc.c | 15 +- drivers/usb/misc/ftdi-elan.c | 12 +- drivers/usb/misc/usbtest.c | 7 +- drivers/usb/musb/Kconfig | 51 +- drivers/usb/musb/Makefile | 1 + drivers/usb/musb/musb_core.c | 17 +- drivers/usb/musb/musb_cppi41.c | 3 + drivers/usb/musb/musb_dsps.c | 6 +- drivers/usb/musb/musb_gadget.c | 87 +- drivers/usb/musb/omap2430.c | 29 +- drivers/usb/musb/sunxi.c | 756 ++ drivers/usb/musb/ux500.c | 2 + drivers/usb/phy/Kconfig | 14 + drivers/usb/phy/Makefile | 1 + drivers/usb/phy/phy-generic.c | 6 +- drivers/usb/phy/phy-isp1301.c | 1 + drivers/usb/phy/phy-keystone.c | 6 +- drivers/usb/phy/phy-msm-usb.c | 67 +- drivers/usb/phy/phy-mxs-usb.c | 6 +- drivers/usb/phy/phy-omap-otg.c | 22 +- drivers/usb/phy/phy-qcom-8x16-usb.c | 436 + drivers/usb/phy/phy-tahvo.c | 27 +- drivers/usb/renesas_usbhs/common.c | 9 +- drivers/usb/renesas_usbhs/mod_gadget.c | 68 + drivers/usb/serial/ftdi_sio_ids.h | 2 +- drivers/usb/serial/io_ti.c | 279 +- drivers/usb/serial/mxuport.c | 10 +- drivers/usb/serial/option.c | 13 +- drivers/usb/serial/qcserial.c | 96 +- drivers/usb/serial/symbolserial.c | 18 +- drivers/usb/serial/ti_usb_3410_5052.c | 2 + drivers/usb/serial/ti_usb_3410_5052.h | 4 + drivers/usb/serial/usb_wwan.c | 2 +- drivers/usb/storage/transport.c | 2 +- drivers/vhost/net.c | 3 +- drivers/vhost/scsi.c | 8 +- drivers/vhost/test.c | 3 + drivers/vhost/vhost.h | 11 +- drivers/video/Kconfig | 2 - drivers/video/backlight/Kconfig | 7 + drivers/video/backlight/Makefile | 1 + drivers/video/backlight/lp855x_bl.c | 23 +- drivers/video/backlight/lp8788_bl.c | 3 +- drivers/video/backlight/pm8941-wled.c | 427 + drivers/video/backlight/sky81452-backlight.c | 26 +- drivers/video/backlight/tosa_bl.c | 1 + drivers/video/console/Kconfig | 2 +- drivers/video/console/fbcon.c | 1 + drivers/video/fbdev/Kconfig | 16 +- drivers/video/fbdev/Makefile | 1 + drivers/video/fbdev/arkfb.c | 36 +- drivers/video/fbdev/atmel_lcdfb.c | 3 +- drivers/video/fbdev/aty/atyfb.h | 5 +- drivers/video/fbdev/aty/atyfb_base.c | 109 +- drivers/video/fbdev/broadsheetfb.c | 8 +- drivers/video/fbdev/core/fbmon.c | 4 +- drivers/video/fbdev/core/fbsysfs.c | 2 +- drivers/video/fbdev/core/modedb.c | 2 +- drivers/video/fbdev/ep93xx-fb.c | 30 +- drivers/video/fbdev/fsl-diu-fb.c | 9 +- drivers/video/fbdev/gxt4500.c | 2 +- drivers/video/fbdev/hyperv_fb.c | 46 +- drivers/video/fbdev/i740fb.c | 35 +- drivers/video/fbdev/kyro/fbdev.c | 33 +- drivers/video/fbdev/mb862xx/mb862xxfbdrv.c | 1 + drivers/video/fbdev/ocfb.c | 1 - .../video/fbdev/omap2/displays-new/connector-dvi.c | 2 +- .../fbdev/omap2/displays-new/encoder-opa362.c | 1 - .../omap2/displays-new/panel-sony-acx565akm.c | 1 + drivers/video/fbdev/omap2/omapfb/omapfb-main.c | 2 +- drivers/video/fbdev/pxa168fb.c | 14 +- drivers/video/fbdev/pxafb.c | 1 - drivers/video/fbdev/s1d13xxxfb.c | 3 +- drivers/video/fbdev/s3c-fb.c | 2 +- drivers/video/fbdev/s3fb.c | 35 +- drivers/video/fbdev/sa1100fb.c | 1 - drivers/video/fbdev/simplefb.c | 1 + drivers/video/fbdev/sm712.h | 116 + drivers/video/fbdev/sm712fb.c | 1653 +++ drivers/video/fbdev/ssd1307fb.c | 6 +- drivers/video/fbdev/stifb.c | 1 + drivers/video/fbdev/tridentfb.c | 12 +- drivers/video/fbdev/udlfb.c | 10 +- drivers/video/fbdev/vfb.c | 17 +- drivers/video/fbdev/vt8623fb.c | 31 +- drivers/video/fbdev/xen-fbfront.c | 20 +- drivers/video/of_display_timing.c | 1 + drivers/virtio/virtio_balloon.c | 16 +- drivers/virtio/virtio_mmio.c | 10 + drivers/w1/masters/ds2482.c | 1 - drivers/w1/masters/matrox_w1.c | 16 +- drivers/watchdog/Kconfig | 26 +- drivers/watchdog/Makefile | 2 + drivers/watchdog/at91rm9200_wdt.c | 7 +- drivers/watchdog/at91sam9_wdt.c | 22 +- drivers/watchdog/at91sam9_wdt.h | 2 + drivers/watchdog/bcm2835_wdt.c | 11 +- drivers/watchdog/bcm47xx_wdt.c | 1 + drivers/watchdog/bcm_kona_wdt.c | 1 + drivers/watchdog/booke_wdt.c | 4 +- drivers/watchdog/coh901327_wdt.c | 1 + drivers/watchdog/da9052_wdt.c | 1 + drivers/watchdog/da9055_wdt.c | 1 + drivers/watchdog/da9062_wdt.c | 1 + drivers/watchdog/da9063_wdt.c | 1 + drivers/watchdog/davinci_wdt.c | 1 + drivers/watchdog/digicolor_wdt.c | 1 + drivers/watchdog/ep93xx_wdt.c | 1 + drivers/watchdog/gef_wdt.c | 1 + drivers/watchdog/gpio_wdt.c | 65 +- drivers/watchdog/iTCO_wdt.c | 82 +- drivers/watchdog/ie6xx_wdt.c | 1 + drivers/watchdog/intel-mid_wdt.c | 1 + drivers/watchdog/jz4740_wdt.c | 1 + drivers/watchdog/ks8695_wdt.c | 9 +- drivers/watchdog/lpc18xx_wdt.c | 340 + drivers/watchdog/mena21_wdt.c | 2 + drivers/watchdog/menf21bmc_wdt.c | 1 + drivers/watchdog/moxart_wdt.c | 1 + drivers/watchdog/mpc8xxx_wdt.c | 156 +- drivers/watchdog/mtk_wdt.c | 39 + drivers/watchdog/nv_tco.c | 2 + drivers/watchdog/omap_wdt.c | 1 + drivers/watchdog/orion_wdt.c | 1 + drivers/watchdog/pnx4008_wdt.c | 1 + drivers/watchdog/qcom-wdt.c | 1 + drivers/watchdog/retu_wdt.c | 1 + drivers/watchdog/rt2880_wdt.c | 1 + drivers/watchdog/s3c2410_wdt.c | 1 + drivers/watchdog/sama5d4_wdt.c | 280 + drivers/watchdog/shwdt.c | 1 + drivers/watchdog/sirfsoc_wdt.c | 1 + drivers/watchdog/sp805_wdt.c | 1 + drivers/watchdog/st_lpc_wdt.c | 1 + drivers/watchdog/stmp3xxx_rtc_wdt.c | 1 + drivers/watchdog/tegra_wdt.c | 1 + drivers/watchdog/ts72xx_wdt.c | 3 +- drivers/watchdog/twl4030_wdt.c | 1 + drivers/watchdog/txx9wdt.c | 1 + drivers/watchdog/ux500_wdt.c | 1 + drivers/watchdog/via_wdt.c | 1 + drivers/watchdog/wm831x_wdt.c | 1 + drivers/watchdog/wm8350_wdt.c | 1 + drivers/xen/Kconfig | 11 + drivers/xen/balloon.c | 8 +- drivers/xen/biomerge.c | 6 +- drivers/xen/events/events_base.c | 17 +- drivers/xen/events/events_fifo.c | 4 +- drivers/xen/gntalloc.c | 5 +- drivers/xen/gntdev.c | 2 +- drivers/xen/manage.c | 2 +- drivers/xen/privcmd.c | 48 +- drivers/xen/swiotlb-xen.c | 22 +- drivers/xen/sys-hypervisor.c | 136 +- drivers/xen/tmem.c | 24 +- drivers/xen/xen-acpi-processor.c | 16 +- drivers/xen/xenbus/xenbus_client.c | 2 +- drivers/xen/xenbus/xenbus_dev_backend.c | 2 +- drivers/xen/xenbus/xenbus_probe.c | 16 +- drivers/xen/xenfs/Makefile | 1 + drivers/xen/xenfs/super.c | 3 + drivers/xen/xenfs/xenfs.h | 1 + drivers/xen/xenfs/xensyms.c | 152 + drivers/xen/xlate_mmu.c | 18 +- 5414 files changed, 460417 insertions(+), 220214 deletions(-) create mode 100644 drivers/acpi/acpica/dsdebug.c create mode 100644 drivers/acpi/acpica/utnonansi.c create mode 100644 drivers/acpi/device_sysfs.c create mode 100644 drivers/base/platform-msi.c create mode 100644 drivers/bluetooth/btqca.c create mode 100644 drivers/bluetooth/btqca.h create mode 100644 drivers/bluetooth/hci_qca.c delete mode 100644 drivers/clk/clk-gpio-gate.c create mode 100644 drivers/clk/clk-gpio.c create mode 100644 drivers/clk/hisilicon/clk-hi6220-stub.c create mode 100644 drivers/clk/imx/clk-imx6ul.c create mode 100644 drivers/clk/rockchip/clk-inverter.c create mode 100644 drivers/clk/rockchip/clk-rk3368.c create mode 100644 drivers/clk/sunxi/clk-simple-gates.c create mode 100644 drivers/clk/tegra/clk-dfll.c create mode 100644 drivers/clk/tegra/clk-dfll.h create mode 100644 drivers/clk/tegra/clk-tegra124-dfll-fcpu.c create mode 100644 drivers/clk/tegra/cvb.c create mode 100644 drivers/clk/tegra/cvb.h create mode 100644 drivers/clk/ti/clk-814x.c create mode 100644 drivers/clk/ti/clkt_dflt.c create mode 100644 drivers/clk/ti/clkt_dpll.c create mode 100644 drivers/clk/ti/clkt_iclk.c create mode 100644 drivers/clk/ti/dpll3xxx.c create mode 100644 drivers/clk/ti/dpll44xx.c delete mode 100644 drivers/clk/ux500/u8500_clk.c delete mode 100644 drivers/clk/zte/clk-pll.c create mode 100644 drivers/clk/zte/clk.c create mode 100644 drivers/clocksource/clksrc_st_lpc.c create mode 100644 drivers/clocksource/time-pistachio.c delete mode 100644 drivers/cpufreq/exynos-cpufreq.c delete mode 100644 drivers/cpufreq/exynos-cpufreq.h delete mode 100644 drivers/cpufreq/exynos4x12-cpufreq.c delete mode 100644 drivers/cpufreq/exynos5250-cpufreq.c create mode 100644 drivers/cpufreq/mt8173-cpufreq.c delete mode 100644 drivers/cpufreq/tegra-cpufreq.c create mode 100644 drivers/cpufreq/tegra124-cpufreq.c create mode 100644 drivers/cpufreq/tegra20-cpufreq.c delete mode 100644 drivers/crypto/nx/nx-842-crypto.c delete mode 100644 drivers/crypto/nx/nx-842-platform.c create mode 100644 drivers/crypto/qat/qat_common/.gitignore create mode 100644 drivers/crypto/qat/qat_common/adf_admin.c create mode 100644 drivers/crypto/qat/qat_common/adf_hw_arbiter.c create mode 100644 drivers/crypto/qat/qat_common/adf_pf2vf_msg.c create mode 100644 drivers/crypto/qat/qat_common/adf_pf2vf_msg.h create mode 100644 drivers/crypto/qat/qat_common/adf_sriov.c create mode 100644 drivers/crypto/qat/qat_common/icp_qat_fw_pke.h create mode 100644 drivers/crypto/qat/qat_common/qat_asym_algs.c create mode 100644 drivers/crypto/qat/qat_common/qat_rsakey.asn1 delete mode 100644 drivers/crypto/qat/qat_dh895xcc/adf_admin.c delete mode 100644 drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c delete mode 100644 drivers/crypto/qat/qat_dh895xcc/qat_admin.c create mode 100644 drivers/crypto/qat/qat_dh895xccvf/Makefile create mode 100644 drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c create mode 100644 drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h create mode 100644 drivers/crypto/qat/qat_dh895xccvf/adf_drv.c create mode 100644 drivers/crypto/qat/qat_dh895xccvf/adf_drv.h create mode 100644 drivers/crypto/qat/qat_dh895xccvf/adf_isr.c create mode 100644 drivers/crypto/sunxi-ss/Makefile create mode 100644 drivers/crypto/sunxi-ss/sun4i-ss-cipher.c create mode 100644 drivers/crypto/sunxi-ss/sun4i-ss-core.c create mode 100644 drivers/crypto/sunxi-ss/sun4i-ss-hash.c create mode 100644 drivers/crypto/sunxi-ss/sun4i-ss.h create mode 100644 drivers/dma/dma-axi-dmac.c create mode 100644 drivers/dma/idma64.c create mode 100644 drivers/dma/idma64.h delete mode 100644 drivers/dma/ioat/dma_v2.c delete mode 100644 drivers/dma/ioat/dma_v2.h delete mode 100644 drivers/dma/ioat/dma_v3.c create mode 100644 drivers/dma/ioat/init.c delete mode 100644 drivers/dma/ioat/pci.c create mode 100644 drivers/dma/ioat/prep.c create mode 100644 drivers/dma/ioat/sysfs.c create mode 100644 drivers/dma/lpc18xx-dmamux.c create mode 100644 drivers/dma/sun4i-dma.c create mode 100644 drivers/dma/zx296702_dma.c delete mode 100644 drivers/edac/mce_amd_inj.c create mode 100644 drivers/firmware/psci.c create mode 100644 drivers/firmware/qcom_scm-64.c create mode 100644 drivers/gpio/gpio-ath79.c create mode 100644 drivers/gpio/gpio-zx.c create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c delete mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_family.h create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c delete mode 100644 drivers/gpu/drm/amd/amdgpu/atom-bits.h delete mode 100644 drivers/gpu/drm/amd/amdgpu/atom-names.h delete mode 100644 drivers/gpu/drm/amd/amdgpu/atom-types.h delete mode 100644 drivers/gpu/drm/amd/amdgpu/atombios.h create mode 100644 drivers/gpu/drm/amd/amdgpu/fiji_dpm.c create mode 100644 drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h create mode 100644 drivers/gpu/drm/amd/amdgpu/fiji_smc.c create mode 100644 drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h delete mode 100644 drivers/gpu/drm/amd/amdgpu/pptable.h create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h create mode 100644 drivers/gpu/drm/amd/include/atom-bits.h create mode 100644 drivers/gpu/drm/amd/include/atom-names.h create mode 100644 drivers/gpu/drm/amd/include/atom-types.h create mode 100644 drivers/gpu/drm/amd/include/atombios.h create mode 100644 drivers/gpu/drm/amd/include/cgs_common.h create mode 100644 drivers/gpu/drm/amd/include/cgs_linux.h create mode 100644 drivers/gpu/drm/amd/include/pptable.h create mode 100644 drivers/gpu/drm/amd/include/vi_structs.h create mode 100644 drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h create mode 100644 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c create mode 100644 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h create mode 100644 drivers/gpu/drm/amd/scheduler/sched_fence.c create mode 100644 drivers/gpu/drm/bridge/nxp-ptn3460.c create mode 100644 drivers/gpu/drm/bridge/parade-ps8622.c delete mode 100644 drivers/gpu/drm/bridge/ps8622.c delete mode 100644 drivers/gpu/drm/bridge/ptn3460.c delete mode 100644 drivers/gpu/drm/exynos/exynos_drm_buf.c delete mode 100644 drivers/gpu/drm/exynos/exynos_drm_buf.h delete mode 100644 drivers/gpu/drm/exynos/exynos_drm_dmabuf.c delete mode 100644 drivers/gpu/drm/exynos/exynos_drm_dmabuf.h delete mode 100644 drivers/gpu/drm/exynos/exynos_drm_encoder.c delete mode 100644 drivers/gpu/drm/exynos/exynos_drm_encoder.h create mode 100644 drivers/gpu/drm/fsl-dcu/Kconfig create mode 100644 drivers/gpu/drm/fsl-dcu/Makefile create mode 100644 drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c create mode 100644 drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h create mode 100644 drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c create mode 100644 drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h create mode 100644 drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c create mode 100644 drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c create mode 100644 drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h create mode 100644 drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c create mode 100644 drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h create mode 100644 drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c create mode 100644 drivers/gpu/drm/i915/i915_gem_fence.c create mode 100644 drivers/gpu/drm/i915/i915_guc_reg.h create mode 100644 drivers/gpu/drm/i915/intel_guc_fwif.h create mode 100644 drivers/gpu/drm/i915/intel_hotplug.c create mode 100644 drivers/gpu/drm/i915/intel_mocs.c create mode 100644 drivers/gpu/drm/i915/intel_mocs.h create mode 100644 drivers/gpu/drm/msm/dsi/dsi_cfg.c create mode 100644 drivers/gpu/drm/msm/dsi/dsi_cfg.h delete mode 100644 drivers/gpu/drm/msm/dsi/dsi_phy.c create mode 100644 drivers/gpu/drm/msm/dsi/phy/dsi_phy.c create mode 100644 drivers/gpu/drm/msm/dsi/phy/dsi_phy.h create mode 100644 drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c create mode 100644 drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c delete mode 100644 drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvkm/core/handle.h create mode 100644 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h create mode 100644 drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvkm/core/parent.h create mode 100644 drivers/gpu/drm/nouveau/include/nvkm/core/pci.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvkm/core/printk.h create mode 100644 drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvkm/engine/device.h create mode 100644 drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h create mode 100644 drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h delete mode 100644 drivers/gpu/drm/nouveau/nouveau_agp.c delete mode 100644 drivers/gpu/drm/nouveau/nouveau_agp.h delete mode 100644 drivers/gpu/drm/nouveau/nvkm/core/engctx.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/core/handle.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/core/memory.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/core/namedb.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/core/oproxy.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/core/parent.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/core/printk.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/device/user.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4 create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h delete mode 100644 drivers/gpu/drm/panel/panel-ld9040.c create mode 100644 drivers/gpu/drm/panel/panel-lg-lg4573.c delete mode 100644 drivers/gpu/drm/panel/panel-s6e8aa0.c create mode 100644 drivers/gpu/drm/panel/panel-samsung-ld9040.c create mode 100644 drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c create mode 100644 drivers/gpu/drm/sti/sti_crtc.c create mode 100644 drivers/gpu/drm/sti/sti_crtc.h delete mode 100644 drivers/gpu/drm/sti/sti_drm_crtc.c delete mode 100644 drivers/gpu/drm/sti/sti_drm_crtc.h delete mode 100644 drivers/gpu/drm/sti/sti_drm_drv.c delete mode 100644 drivers/gpu/drm/sti/sti_drm_drv.h delete mode 100644 drivers/gpu/drm/sti/sti_drm_plane.c delete mode 100644 drivers/gpu/drm/sti/sti_drm_plane.h create mode 100644 drivers/gpu/drm/sti/sti_drv.c create mode 100644 drivers/gpu/drm/sti/sti_drv.h delete mode 100644 drivers/gpu/drm/sti/sti_hqvdp.h delete mode 100644 drivers/gpu/drm/sti/sti_layer.c delete mode 100644 drivers/gpu/drm/sti/sti_layer.h create mode 100644 drivers/gpu/drm/sti/sti_plane.c create mode 100644 drivers/gpu/drm/sti/sti_plane.h create mode 100644 drivers/gpu/drm/vmwgfx/device_include/includeCheck.h create mode 100644 drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h create mode 100644 drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h create mode 100644 drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h create mode 100644 drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h create mode 100644 drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h create mode 100644 drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h create mode 100644 drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h create mode 100644 drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h create mode 100644 drivers/gpu/drm/vmwgfx/device_include/svga_escape.h create mode 100644 drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h create mode 100644 drivers/gpu/drm/vmwgfx/device_include/svga_reg.h create mode 100644 drivers/gpu/drm/vmwgfx/device_include/svga_types.h create mode 100644 drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h create mode 100644 drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h create mode 100644 drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h delete mode 100644 drivers/gpu/drm/vmwgfx/svga3d_reg.h delete mode 100644 drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h delete mode 100644 drivers/gpu/drm/vmwgfx/svga_escape.h delete mode 100644 drivers/gpu/drm/vmwgfx/svga_overlay.h delete mode 100644 drivers/gpu/drm/vmwgfx/svga_reg.h delete mode 100644 drivers/gpu/drm/vmwgfx/svga_types.h create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_binding.c create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_binding.h create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_so.c create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_so.h create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c create mode 100644 drivers/hid/hid-gembird.c create mode 100644 drivers/hwmon/pmbus/max20751.c create mode 100644 drivers/i2c/busses/i2c-emev2.c create mode 100644 drivers/i2c/busses/i2c-lpc2k.c create mode 100644 drivers/i2c/muxes/i2c-mux-reg.c create mode 100644 drivers/iio/light/opt3001.c create mode 100644 drivers/iio/light/pa12203001.c create mode 100644 drivers/iio/light/rpr0521.c create mode 100644 drivers/infiniband/core/roce_gid_mgmt.c delete mode 100644 drivers/infiniband/hw/amso1100/Kbuild delete mode 100644 drivers/infiniband/hw/amso1100/Kconfig delete mode 100644 drivers/infiniband/hw/amso1100/c2.c delete mode 100644 drivers/infiniband/hw/amso1100/c2.h delete mode 100644 drivers/infiniband/hw/amso1100/c2_ae.c delete mode 100644 drivers/infiniband/hw/amso1100/c2_ae.h delete mode 100644 drivers/infiniband/hw/amso1100/c2_alloc.c delete mode 100644 drivers/infiniband/hw/amso1100/c2_cm.c delete mode 100644 drivers/infiniband/hw/amso1100/c2_cq.c delete mode 100644 drivers/infiniband/hw/amso1100/c2_intr.c delete mode 100644 drivers/infiniband/hw/amso1100/c2_mm.c delete mode 100644 drivers/infiniband/hw/amso1100/c2_mq.c delete mode 100644 drivers/infiniband/hw/amso1100/c2_mq.h delete mode 100644 drivers/infiniband/hw/amso1100/c2_pd.c delete mode 100644 drivers/infiniband/hw/amso1100/c2_provider.c delete mode 100644 drivers/infiniband/hw/amso1100/c2_provider.h delete mode 100644 drivers/infiniband/hw/amso1100/c2_qp.c delete mode 100644 drivers/infiniband/hw/amso1100/c2_rnic.c delete mode 100644 drivers/infiniband/hw/amso1100/c2_status.h delete mode 100644 drivers/infiniband/hw/amso1100/c2_user.h delete mode 100644 drivers/infiniband/hw/amso1100/c2_vq.c delete mode 100644 drivers/infiniband/hw/amso1100/c2_vq.h delete mode 100644 drivers/infiniband/hw/amso1100/c2_wr.h delete mode 100644 drivers/infiniband/hw/ehca/Kconfig delete mode 100644 drivers/infiniband/hw/ehca/Makefile delete mode 100644 drivers/infiniband/hw/ehca/ehca_av.c delete mode 100644 drivers/infiniband/hw/ehca/ehca_classes.h delete mode 100644 drivers/infiniband/hw/ehca/ehca_classes_pSeries.h delete mode 100644 drivers/infiniband/hw/ehca/ehca_cq.c delete mode 100644 drivers/infiniband/hw/ehca/ehca_eq.c delete mode 100644 drivers/infiniband/hw/ehca/ehca_hca.c delete mode 100644 drivers/infiniband/hw/ehca/ehca_irq.c delete mode 100644 drivers/infiniband/hw/ehca/ehca_irq.h delete mode 100644 drivers/infiniband/hw/ehca/ehca_iverbs.h delete mode 100644 drivers/infiniband/hw/ehca/ehca_main.c delete mode 100644 drivers/infiniband/hw/ehca/ehca_mcast.c delete mode 100644 drivers/infiniband/hw/ehca/ehca_mrmw.c delete mode 100644 drivers/infiniband/hw/ehca/ehca_mrmw.h delete mode 100644 drivers/infiniband/hw/ehca/ehca_pd.c delete mode 100644 drivers/infiniband/hw/ehca/ehca_qes.h delete mode 100644 drivers/infiniband/hw/ehca/ehca_qp.c delete mode 100644 drivers/infiniband/hw/ehca/ehca_reqs.c delete mode 100644 drivers/infiniband/hw/ehca/ehca_sqp.c delete mode 100644 drivers/infiniband/hw/ehca/ehca_tools.h delete mode 100644 drivers/infiniband/hw/ehca/ehca_uverbs.c delete mode 100644 drivers/infiniband/hw/ehca/hcp_if.c delete mode 100644 drivers/infiniband/hw/ehca/hcp_if.h delete mode 100644 drivers/infiniband/hw/ehca/hcp_phyp.c delete mode 100644 drivers/infiniband/hw/ehca/hcp_phyp.h delete mode 100644 drivers/infiniband/hw/ehca/hipz_fns.h delete mode 100644 drivers/infiniband/hw/ehca/hipz_fns_core.h delete mode 100644 drivers/infiniband/hw/ehca/hipz_hw.h delete mode 100644 drivers/infiniband/hw/ehca/ipz_pt_fn.c delete mode 100644 drivers/infiniband/hw/ehca/ipz_pt_fn.h delete mode 100644 drivers/infiniband/hw/ipath/Kconfig delete mode 100644 drivers/infiniband/hw/ipath/Makefile delete mode 100644 drivers/infiniband/hw/ipath/ipath_common.h delete mode 100644 drivers/infiniband/hw/ipath/ipath_cq.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_debug.h delete mode 100644 drivers/infiniband/hw/ipath/ipath_diag.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_dma.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_driver.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_eeprom.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_file_ops.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_fs.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_iba6110.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_init_chip.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_intr.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_kernel.h delete mode 100644 drivers/infiniband/hw/ipath/ipath_keys.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_mad.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_mmap.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_mr.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_qp.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_rc.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_registers.h delete mode 100644 drivers/infiniband/hw/ipath/ipath_ruc.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_sdma.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_srq.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_stats.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_sysfs.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_uc.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_ud.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_user_pages.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_user_sdma.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_user_sdma.h delete mode 100644 drivers/infiniband/hw/ipath/ipath_verbs.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_verbs.h delete mode 100644 drivers/infiniband/hw/ipath/ipath_verbs_mcast.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_wc_ppc64.c delete mode 100644 drivers/infiniband/hw/ipath/ipath_wc_x86_64.c create mode 100644 drivers/input/keyboard/snvs_pwrkey.c delete mode 100644 drivers/input/misc/max77843-haptic.c create mode 100644 drivers/input/mouse/cyapa_gen6.c create mode 100644 drivers/input/touchscreen/colibri-vf50-ts.c create mode 100644 drivers/input/touchscreen/imx6ul_tsc.c create mode 100644 drivers/irqchip/irq-bcm2836.c create mode 100644 drivers/irqchip/irq-gic-v3-its-pci-msi.c create mode 100644 drivers/irqchip/irq-gic-v3-its-platform-msi.c create mode 100644 drivers/irqchip/irq-i8259.c create mode 100644 drivers/irqchip/irq-imx-gpcv2.c delete mode 100644 drivers/irqchip/irqchip.h delete mode 100644 drivers/leds/leds-pm8941-wled.c create mode 100644 drivers/leds/leds-powernv.c create mode 100644 drivers/media/dvb-frontends/ascot2e.c create mode 100644 drivers/media/dvb-frontends/ascot2e.h create mode 100644 drivers/media/dvb-frontends/cxd2841er.c create mode 100644 drivers/media/dvb-frontends/cxd2841er.h create mode 100644 drivers/media/dvb-frontends/cxd2841er_priv.h create mode 100644 drivers/media/dvb-frontends/horus3a.c create mode 100644 drivers/media/dvb-frontends/horus3a.h create mode 100644 drivers/media/dvb-frontends/lnbh25.c create mode 100644 drivers/media/dvb-frontends/lnbh25.h create mode 100644 drivers/media/i2c/tc358743.c create mode 100644 drivers/media/i2c/tc358743_regs.h create mode 100644 drivers/media/pci/netup_unidvb/Kconfig create mode 100644 drivers/media/pci/netup_unidvb/Makefile create mode 100644 drivers/media/pci/netup_unidvb/netup_unidvb.h create mode 100644 drivers/media/pci/netup_unidvb/netup_unidvb_ci.c create mode 100644 drivers/media/pci/netup_unidvb/netup_unidvb_core.c create mode 100644 drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c create mode 100644 drivers/media/pci/netup_unidvb/netup_unidvb_spi.c create mode 100644 drivers/media/pci/smipcie/smipcie-ir.c create mode 100644 drivers/media/pci/smipcie/smipcie-main.c delete mode 100644 drivers/media/pci/smipcie/smipcie.c create mode 100644 drivers/media/platform/coda/coda-gdi.c create mode 100644 drivers/media/platform/omap3isp/omap3isp.h create mode 100644 drivers/media/platform/rcar_jpu.c create mode 100644 drivers/media/platform/sti/c8sectpfe/Kconfig create mode 100644 drivers/media/platform/sti/c8sectpfe/Makefile create mode 100644 drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c create mode 100644 drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.h create mode 100644 drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c create mode 100644 drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h create mode 100644 drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c create mode 100644 drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.h create mode 100644 drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c create mode 100644 drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.h create mode 100644 drivers/media/v4l2-core/v4l2-trace.c create mode 100644 drivers/memory/pl172.c create mode 100644 drivers/memory/tegra/tegra210.c create mode 100644 drivers/mfd/da9062-core.c create mode 100644 drivers/mfd/intel-lpss-acpi.c create mode 100644 drivers/mfd/intel-lpss-pci.c create mode 100644 drivers/mfd/intel-lpss.c create mode 100644 drivers/mfd/intel-lpss.h create mode 100644 drivers/mfd/wm8998-tables.c delete mode 100644 drivers/misc/eeprom/sunxi_sid.c create mode 100644 drivers/misc/mei/bus-fixup.c delete mode 100644 drivers/misc/mei/nfc.c create mode 100644 drivers/misc/qcom-coincell.c create mode 100644 drivers/mmc/host/sdhci-of-at91.c create mode 100644 drivers/mtd/spi-nor/nxp-spifi.c create mode 100644 drivers/net/ethernet/mellanox/mlxsw/Kconfig create mode 100644 drivers/net/ethernet/mellanox/mlxsw/Makefile create mode 100644 drivers/net/ethernet/mellanox/mlxsw/cmd.h create mode 100644 drivers/net/ethernet/mellanox/mlxsw/core.c create mode 100644 drivers/net/ethernet/mellanox/mlxsw/core.h create mode 100644 drivers/net/ethernet/mellanox/mlxsw/emad.h create mode 100644 drivers/net/ethernet/mellanox/mlxsw/item.h create mode 100644 drivers/net/ethernet/mellanox/mlxsw/pci.c create mode 100644 drivers/net/ethernet/mellanox/mlxsw/pci.h create mode 100644 drivers/net/ethernet/mellanox/mlxsw/port.h create mode 100644 drivers/net/ethernet/mellanox/mlxsw/reg.h create mode 100644 drivers/net/ethernet/mellanox/mlxsw/switchx2.c create mode 100644 drivers/net/ethernet/mellanox/mlxsw/trap.h create mode 100644 drivers/net/ethernet/mellanox/mlxsw/txheader.h create mode 100644 drivers/net/ethernet/synopsys/Kconfig create mode 100644 drivers/net/ethernet/synopsys/Makefile create mode 100644 drivers/net/ethernet/synopsys/dwc_eth_qos.c create mode 100644 drivers/net/fjes/Makefile create mode 100644 drivers/net/fjes/fjes.h create mode 100644 drivers/net/fjes/fjes_ethtool.c create mode 100644 drivers/net/fjes/fjes_hw.c create mode 100644 drivers/net/fjes/fjes_hw.h create mode 100644 drivers/net/fjes/fjes_main.c create mode 100644 drivers/net/fjes/fjes_regs.h create mode 100644 drivers/net/phy/aquantia.c create mode 100644 drivers/net/phy/dp83848.c create mode 100644 drivers/net/phy/microchip.c create mode 100644 drivers/net/phy/teranetics.c create mode 100644 drivers/net/usb/ch9200.c create mode 100644 drivers/net/usb/lan78xx.c create mode 100644 drivers/net/usb/lan78xx.h create mode 100644 drivers/net/vrf.c create mode 100644 drivers/net/wireless/ath/ath10k/swap.c create mode 100644 drivers/net/wireless/ath/ath10k/swap.h create mode 100644 drivers/net/wireless/ath/wil6210/boot_loader.h create mode 100644 drivers/net/wireless/ath/wil6210/pm.c create mode 100644 drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h create mode 100644 drivers/net/wireless/iwlwifi/mvm/tof.c create mode 100644 drivers/net/wireless/iwlwifi/mvm/tof.h create mode 100644 drivers/nfc/s3fwrn5/Kconfig create mode 100644 drivers/nfc/s3fwrn5/Makefile create mode 100644 drivers/nfc/s3fwrn5/core.c create mode 100644 drivers/nfc/s3fwrn5/firmware.c create mode 100644 drivers/nfc/s3fwrn5/firmware.h create mode 100644 drivers/nfc/s3fwrn5/i2c.c create mode 100644 drivers/nfc/s3fwrn5/nci.c create mode 100644 drivers/nfc/s3fwrn5/nci.h create mode 100644 drivers/nfc/s3fwrn5/s3fwrn5.h create mode 100644 drivers/nfc/st-nci/spi.c create mode 100644 drivers/nvdimm/claim.c create mode 100644 drivers/nvdimm/e820.c create mode 100644 drivers/nvdimm/pfn.h create mode 100644 drivers/nvdimm/pfn_devs.c create mode 100644 drivers/nvmem/Kconfig create mode 100644 drivers/nvmem/Makefile create mode 100644 drivers/nvmem/core.c create mode 100644 drivers/nvmem/qfprom.c create mode 100644 drivers/nvmem/sunxi_sid.c create mode 100644 drivers/perf/Kconfig create mode 100644 drivers/perf/Makefile create mode 100644 drivers/perf/arm_pmu.c create mode 100644 drivers/phy/phy-lpc18xx-usb-otg.c create mode 100644 drivers/pinctrl/freescale/pinctrl-imx6ul.c create mode 100644 drivers/pinctrl/pinctrl-digicolor.c create mode 100644 drivers/pinctrl/qcom/pinctrl-qdf2xxx.c create mode 100644 drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c create mode 100644 drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c create mode 100644 drivers/pinctrl/uniphier/Kconfig create mode 100644 drivers/pinctrl/uniphier/Makefile create mode 100644 drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c create mode 100644 drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c create mode 100644 drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c create mode 100644 drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c create mode 100644 drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c create mode 100644 drivers/pinctrl/uniphier/pinctrl-proxstream2.c create mode 100644 drivers/pinctrl/uniphier/pinctrl-uniphier-core.c create mode 100644 drivers/pinctrl/uniphier/pinctrl-uniphier.h create mode 100644 drivers/platform/x86/surfacepro3_button.c create mode 100644 drivers/power/reset/zx-reboot.c create mode 100644 drivers/pwm/pwm-crc.c create mode 100644 drivers/pwm/pwm-lpc18xx-sct.c delete mode 100644 drivers/regulator/max77843.c create mode 100644 drivers/regulator/mt6311-regulator.c create mode 100644 drivers/regulator/mt6311-regulator.h create mode 100644 drivers/regulator/qcom_smd-regulator.c create mode 100644 drivers/reset/reset-ath79.c create mode 100644 drivers/reset/reset-lpc18xx.c create mode 100644 drivers/reset/reset-zynq.c create mode 100644 drivers/rtc/rtc-lpc24xx.c create mode 100644 drivers/rtc/rtc-sa1100.h create mode 100644 drivers/rtc/rtc-zynqmp.c create mode 100644 drivers/scsi/cxlflash/Kconfig create mode 100644 drivers/scsi/cxlflash/Makefile create mode 100644 drivers/scsi/cxlflash/common.h create mode 100644 drivers/scsi/cxlflash/lunmgt.c create mode 100644 drivers/scsi/cxlflash/main.c create mode 100644 drivers/scsi/cxlflash/main.h create mode 100644 drivers/scsi/cxlflash/sislite.h create mode 100644 drivers/scsi/cxlflash/superpipe.c create mode 100644 drivers/scsi/cxlflash/superpipe.h create mode 100644 drivers/scsi/cxlflash/vlun.c create mode 100644 drivers/scsi/cxlflash/vlun.h delete mode 100644 drivers/scsi/device_handler/scsi_dh.c create mode 100644 drivers/scsi/scsi_dh.c create mode 100644 drivers/soc/dove/Makefile create mode 100644 drivers/soc/dove/pmu.c create mode 100644 drivers/soc/mediatek/mtk-infracfg.c create mode 100644 drivers/soc/mediatek/mtk-scpsys.c create mode 100644 drivers/soc/qcom/smd-rpm.c create mode 100644 drivers/soc/qcom/smd.c create mode 100644 drivers/soc/qcom/smem.c create mode 100644 drivers/soc/tegra/fuse/speedo-tegra210.c create mode 100644 drivers/spi/spi-mt65xx.c create mode 100644 drivers/spi/spi-xlp.c create mode 100644 drivers/staging/fbtft/fb_uc1611.c create mode 100644 drivers/staging/fsl-mc/README.txt create mode 100644 drivers/staging/most/Documentation/ABI/sysfs-class-most.txt create mode 100644 drivers/staging/most/Documentation/driver_usage.txt create mode 100644 drivers/staging/most/Kconfig create mode 100644 drivers/staging/most/Makefile create mode 100644 drivers/staging/most/TODO create mode 100644 drivers/staging/most/aim-cdev/Kconfig create mode 100644 drivers/staging/most/aim-cdev/Makefile create mode 100644 drivers/staging/most/aim-cdev/cdev.c create mode 100644 drivers/staging/most/aim-network/Kconfig create mode 100644 drivers/staging/most/aim-network/Makefile create mode 100644 drivers/staging/most/aim-network/networking.c create mode 100644 drivers/staging/most/aim-network/networking.h create mode 100644 drivers/staging/most/aim-sound/Kconfig create mode 100644 drivers/staging/most/aim-sound/Makefile create mode 100644 drivers/staging/most/aim-sound/sound.c create mode 100644 drivers/staging/most/aim-v4l2/Kconfig create mode 100644 drivers/staging/most/aim-v4l2/Makefile create mode 100644 drivers/staging/most/aim-v4l2/video.c create mode 100644 drivers/staging/most/hdm-dim2/Kconfig create mode 100644 drivers/staging/most/hdm-dim2/Makefile create mode 100644 drivers/staging/most/hdm-dim2/dim2_errors.h create mode 100644 drivers/staging/most/hdm-dim2/dim2_hal.c create mode 100644 drivers/staging/most/hdm-dim2/dim2_hal.h create mode 100644 drivers/staging/most/hdm-dim2/dim2_hdm.c create mode 100644 drivers/staging/most/hdm-dim2/dim2_hdm.h create mode 100644 drivers/staging/most/hdm-dim2/dim2_reg.h create mode 100644 drivers/staging/most/hdm-dim2/dim2_sysfs.c create mode 100644 drivers/staging/most/hdm-dim2/dim2_sysfs.h create mode 100644 drivers/staging/most/hdm-i2c/Kconfig create mode 100644 drivers/staging/most/hdm-i2c/Makefile create mode 100644 drivers/staging/most/hdm-i2c/hdm_i2c.c create mode 100644 drivers/staging/most/hdm-usb/Kconfig create mode 100644 drivers/staging/most/hdm-usb/Makefile create mode 100644 drivers/staging/most/hdm-usb/hdm_usb.c create mode 100644 drivers/staging/most/mostcore/Kconfig create mode 100644 drivers/staging/most/mostcore/Makefile create mode 100644 drivers/staging/most/mostcore/core.c create mode 100644 drivers/staging/most/mostcore/mostcore.h delete mode 100644 drivers/staging/ozwpan/Kconfig delete mode 100644 drivers/staging/ozwpan/Makefile delete mode 100644 drivers/staging/ozwpan/README delete mode 100644 drivers/staging/ozwpan/TODO delete mode 100644 drivers/staging/ozwpan/ozappif.h delete mode 100644 drivers/staging/ozwpan/ozcdev.c delete mode 100644 drivers/staging/ozwpan/ozcdev.h delete mode 100644 drivers/staging/ozwpan/ozdbg.h delete mode 100644 drivers/staging/ozwpan/ozeltbuf.c delete mode 100644 drivers/staging/ozwpan/ozeltbuf.h delete mode 100644 drivers/staging/ozwpan/ozhcd.c delete mode 100644 drivers/staging/ozwpan/ozhcd.h delete mode 100644 drivers/staging/ozwpan/ozmain.c delete mode 100644 drivers/staging/ozwpan/ozpd.c delete mode 100644 drivers/staging/ozwpan/ozpd.h delete mode 100644 drivers/staging/ozwpan/ozproto.c delete mode 100644 drivers/staging/ozwpan/ozproto.h delete mode 100644 drivers/staging/ozwpan/ozprotocol.h delete mode 100644 drivers/staging/ozwpan/ozurbparanoia.c delete mode 100644 drivers/staging/ozwpan/ozurbparanoia.h delete mode 100644 drivers/staging/ozwpan/ozusbif.h delete mode 100644 drivers/staging/ozwpan/ozusbsvc.c delete mode 100644 drivers/staging/ozwpan/ozusbsvc.h delete mode 100644 drivers/staging/ozwpan/ozusbsvc1.c create mode 100644 drivers/staging/rdma/Kconfig create mode 100644 drivers/staging/rdma/Makefile create mode 100644 drivers/staging/rdma/amso1100/Kbuild create mode 100644 drivers/staging/rdma/amso1100/Kconfig create mode 100644 drivers/staging/rdma/amso1100/TODO create mode 100644 drivers/staging/rdma/amso1100/c2.c create mode 100644 drivers/staging/rdma/amso1100/c2.h create mode 100644 drivers/staging/rdma/amso1100/c2_ae.c create mode 100644 drivers/staging/rdma/amso1100/c2_ae.h create mode 100644 drivers/staging/rdma/amso1100/c2_alloc.c create mode 100644 drivers/staging/rdma/amso1100/c2_cm.c create mode 100644 drivers/staging/rdma/amso1100/c2_cq.c create mode 100644 drivers/staging/rdma/amso1100/c2_intr.c create mode 100644 drivers/staging/rdma/amso1100/c2_mm.c create mode 100644 drivers/staging/rdma/amso1100/c2_mq.c create mode 100644 drivers/staging/rdma/amso1100/c2_mq.h create mode 100644 drivers/staging/rdma/amso1100/c2_pd.c create mode 100644 drivers/staging/rdma/amso1100/c2_provider.c create mode 100644 drivers/staging/rdma/amso1100/c2_provider.h create mode 100644 drivers/staging/rdma/amso1100/c2_qp.c create mode 100644 drivers/staging/rdma/amso1100/c2_rnic.c create mode 100644 drivers/staging/rdma/amso1100/c2_status.h create mode 100644 drivers/staging/rdma/amso1100/c2_user.h create mode 100644 drivers/staging/rdma/amso1100/c2_vq.c create mode 100644 drivers/staging/rdma/amso1100/c2_vq.h create mode 100644 drivers/staging/rdma/amso1100/c2_wr.h create mode 100644 drivers/staging/rdma/ehca/Kconfig create mode 100644 drivers/staging/rdma/ehca/Makefile create mode 100644 drivers/staging/rdma/ehca/TODO create mode 100644 drivers/staging/rdma/ehca/ehca_av.c create mode 100644 drivers/staging/rdma/ehca/ehca_classes.h create mode 100644 drivers/staging/rdma/ehca/ehca_classes_pSeries.h create mode 100644 drivers/staging/rdma/ehca/ehca_cq.c create mode 100644 drivers/staging/rdma/ehca/ehca_eq.c create mode 100644 drivers/staging/rdma/ehca/ehca_hca.c create mode 100644 drivers/staging/rdma/ehca/ehca_irq.c create mode 100644 drivers/staging/rdma/ehca/ehca_irq.h create mode 100644 drivers/staging/rdma/ehca/ehca_iverbs.h create mode 100644 drivers/staging/rdma/ehca/ehca_main.c create mode 100644 drivers/staging/rdma/ehca/ehca_mcast.c create mode 100644 drivers/staging/rdma/ehca/ehca_mrmw.c create mode 100644 drivers/staging/rdma/ehca/ehca_mrmw.h create mode 100644 drivers/staging/rdma/ehca/ehca_pd.c create mode 100644 drivers/staging/rdma/ehca/ehca_qes.h create mode 100644 drivers/staging/rdma/ehca/ehca_qp.c create mode 100644 drivers/staging/rdma/ehca/ehca_reqs.c create mode 100644 drivers/staging/rdma/ehca/ehca_sqp.c create mode 100644 drivers/staging/rdma/ehca/ehca_tools.h create mode 100644 drivers/staging/rdma/ehca/ehca_uverbs.c create mode 100644 drivers/staging/rdma/ehca/hcp_if.c create mode 100644 drivers/staging/rdma/ehca/hcp_if.h create mode 100644 drivers/staging/rdma/ehca/hcp_phyp.c create mode 100644 drivers/staging/rdma/ehca/hcp_phyp.h create mode 100644 drivers/staging/rdma/ehca/hipz_fns.h create mode 100644 drivers/staging/rdma/ehca/hipz_fns_core.h create mode 100644 drivers/staging/rdma/ehca/hipz_hw.h create mode 100644 drivers/staging/rdma/ehca/ipz_pt_fn.c create mode 100644 drivers/staging/rdma/ehca/ipz_pt_fn.h create mode 100644 drivers/staging/rdma/hfi1/Kconfig create mode 100644 drivers/staging/rdma/hfi1/Makefile create mode 100644 drivers/staging/rdma/hfi1/TODO create mode 100644 drivers/staging/rdma/hfi1/chip.c create mode 100644 drivers/staging/rdma/hfi1/chip.h create mode 100644 drivers/staging/rdma/hfi1/chip_registers.h create mode 100644 drivers/staging/rdma/hfi1/common.h create mode 100644 drivers/staging/rdma/hfi1/cq.c create mode 100644 drivers/staging/rdma/hfi1/debugfs.c create mode 100644 drivers/staging/rdma/hfi1/debugfs.h create mode 100644 drivers/staging/rdma/hfi1/device.c create mode 100644 drivers/staging/rdma/hfi1/device.h create mode 100644 drivers/staging/rdma/hfi1/diag.c create mode 100644 drivers/staging/rdma/hfi1/dma.c create mode 100644 drivers/staging/rdma/hfi1/driver.c create mode 100644 drivers/staging/rdma/hfi1/eprom.c create mode 100644 drivers/staging/rdma/hfi1/eprom.h create mode 100644 drivers/staging/rdma/hfi1/file_ops.c create mode 100644 drivers/staging/rdma/hfi1/firmware.c create mode 100644 drivers/staging/rdma/hfi1/hfi.h create mode 100644 drivers/staging/rdma/hfi1/init.c create mode 100644 drivers/staging/rdma/hfi1/intr.c create mode 100644 drivers/staging/rdma/hfi1/iowait.h create mode 100644 drivers/staging/rdma/hfi1/keys.c create mode 100644 drivers/staging/rdma/hfi1/mad.c create mode 100644 drivers/staging/rdma/hfi1/mad.h create mode 100644 drivers/staging/rdma/hfi1/mmap.c create mode 100644 drivers/staging/rdma/hfi1/mr.c create mode 100644 drivers/staging/rdma/hfi1/opa_compat.h create mode 100644 drivers/staging/rdma/hfi1/pcie.c create mode 100644 drivers/staging/rdma/hfi1/pio.c create mode 100644 drivers/staging/rdma/hfi1/pio.h create mode 100644 drivers/staging/rdma/hfi1/pio_copy.c create mode 100644 drivers/staging/rdma/hfi1/platform_config.h create mode 100644 drivers/staging/rdma/hfi1/qp.c create mode 100644 drivers/staging/rdma/hfi1/qp.h create mode 100644 drivers/staging/rdma/hfi1/qsfp.c create mode 100644 drivers/staging/rdma/hfi1/qsfp.h create mode 100644 drivers/staging/rdma/hfi1/rc.c create mode 100644 drivers/staging/rdma/hfi1/ruc.c create mode 100644 drivers/staging/rdma/hfi1/sdma.c create mode 100644 drivers/staging/rdma/hfi1/sdma.h create mode 100644 drivers/staging/rdma/hfi1/srq.c create mode 100644 drivers/staging/rdma/hfi1/sysfs.c create mode 100644 drivers/staging/rdma/hfi1/trace.c create mode 100644 drivers/staging/rdma/hfi1/trace.h create mode 100644 drivers/staging/rdma/hfi1/twsi.c create mode 100644 drivers/staging/rdma/hfi1/twsi.h create mode 100644 drivers/staging/rdma/hfi1/uc.c create mode 100644 drivers/staging/rdma/hfi1/ud.c create mode 100644 drivers/staging/rdma/hfi1/user_pages.c create mode 100644 drivers/staging/rdma/hfi1/user_sdma.c create mode 100644 drivers/staging/rdma/hfi1/user_sdma.h create mode 100644 drivers/staging/rdma/hfi1/verbs.c create mode 100644 drivers/staging/rdma/hfi1/verbs.h create mode 100644 drivers/staging/rdma/hfi1/verbs_mcast.c create mode 100644 drivers/staging/rdma/ipath/Kconfig create mode 100644 drivers/staging/rdma/ipath/Makefile create mode 100644 drivers/staging/rdma/ipath/TODO create mode 100644 drivers/staging/rdma/ipath/ipath_common.h create mode 100644 drivers/staging/rdma/ipath/ipath_cq.c create mode 100644 drivers/staging/rdma/ipath/ipath_debug.h create mode 100644 drivers/staging/rdma/ipath/ipath_diag.c create mode 100644 drivers/staging/rdma/ipath/ipath_dma.c create mode 100644 drivers/staging/rdma/ipath/ipath_driver.c create mode 100644 drivers/staging/rdma/ipath/ipath_eeprom.c create mode 100644 drivers/staging/rdma/ipath/ipath_file_ops.c create mode 100644 drivers/staging/rdma/ipath/ipath_fs.c create mode 100644 drivers/staging/rdma/ipath/ipath_iba6110.c create mode 100644 drivers/staging/rdma/ipath/ipath_init_chip.c create mode 100644 drivers/staging/rdma/ipath/ipath_intr.c create mode 100644 drivers/staging/rdma/ipath/ipath_kernel.h create mode 100644 drivers/staging/rdma/ipath/ipath_keys.c create mode 100644 drivers/staging/rdma/ipath/ipath_mad.c create mode 100644 drivers/staging/rdma/ipath/ipath_mmap.c create mode 100644 drivers/staging/rdma/ipath/ipath_mr.c create mode 100644 drivers/staging/rdma/ipath/ipath_qp.c create mode 100644 drivers/staging/rdma/ipath/ipath_rc.c create mode 100644 drivers/staging/rdma/ipath/ipath_registers.h create mode 100644 drivers/staging/rdma/ipath/ipath_ruc.c create mode 100644 drivers/staging/rdma/ipath/ipath_sdma.c create mode 100644 drivers/staging/rdma/ipath/ipath_srq.c create mode 100644 drivers/staging/rdma/ipath/ipath_stats.c create mode 100644 drivers/staging/rdma/ipath/ipath_sysfs.c create mode 100644 drivers/staging/rdma/ipath/ipath_uc.c create mode 100644 drivers/staging/rdma/ipath/ipath_ud.c create mode 100644 drivers/staging/rdma/ipath/ipath_user_pages.c create mode 100644 drivers/staging/rdma/ipath/ipath_user_sdma.c create mode 100644 drivers/staging/rdma/ipath/ipath_user_sdma.h create mode 100644 drivers/staging/rdma/ipath/ipath_verbs.c create mode 100644 drivers/staging/rdma/ipath/ipath_verbs.h create mode 100644 drivers/staging/rdma/ipath/ipath_verbs_mcast.c create mode 100644 drivers/staging/rdma/ipath/ipath_wc_ppc64.c create mode 100644 drivers/staging/rdma/ipath/ipath_wc_x86_64.c delete mode 100644 drivers/staging/sm7xxfb/Kconfig delete mode 100644 drivers/staging/sm7xxfb/Makefile delete mode 100644 drivers/staging/sm7xxfb/TODO delete mode 100644 drivers/staging/sm7xxfb/sm7xx.h delete mode 100644 drivers/staging/sm7xxfb/sm7xxfb.c delete mode 100644 drivers/staging/wilc1000/coreconfigsimulator.h delete mode 100644 drivers/staging/wilc1000/fifo_buffer.c delete mode 100644 drivers/staging/wilc1000/fifo_buffer.h delete mode 100644 drivers/staging/wilc1000/wilc_log.h delete mode 100644 drivers/staging/wilc1000/wilc_osconfig.h delete mode 100644 drivers/staging/wilc1000/wilc_sleep.c delete mode 100644 drivers/staging/wilc1000/wilc_sleep.h delete mode 100644 drivers/staging/wilc1000/wilc_strutils.c delete mode 100644 drivers/staging/wilc1000/wilc_strutils.h delete mode 100644 drivers/staging/wilc1000/wilc_timer.c delete mode 100644 drivers/staging/wilc1000/wilc_timer.h delete mode 100644 drivers/staging/wilc1000/wilc_type.h delete mode 100644 drivers/staging/wilc1000/wilc_wfi_netdevice.c delete mode 100644 drivers/staging/xgifb/vb_util.c create mode 100644 drivers/thermal/intel_pch_thermal.c create mode 100644 drivers/tty/serial/8250/8250_port.c delete mode 100644 drivers/usb/gadget/udc/gadget_chips.h create mode 100644 drivers/usb/musb/sunxi.c create mode 100644 drivers/usb/phy/phy-qcom-8x16-usb.c create mode 100644 drivers/video/backlight/pm8941-wled.c create mode 100644 drivers/video/fbdev/sm712.h create mode 100644 drivers/video/fbdev/sm712fb.c create mode 100644 drivers/watchdog/lpc18xx_wdt.c create mode 100644 drivers/watchdog/sama5d4_wdt.c create mode 100644 drivers/xen/xenfs/xensyms.c (limited to 'drivers') diff --git a/drivers/Kconfig b/drivers/Kconfig index 6e973b8e3..46b4a8e0f 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -176,6 +176,8 @@ source "drivers/powercap/Kconfig" source "drivers/mcb/Kconfig" +source "drivers/perf/Kconfig" + source "drivers/ras/Kconfig" source "drivers/thunderbolt/Kconfig" @@ -184,4 +186,6 @@ source "drivers/android/Kconfig" source "drivers/nvdimm/Kconfig" +source "drivers/nvmem/Kconfig" + endmenu diff --git a/drivers/Makefile b/drivers/Makefile index b64b49f6e..b250b36b5 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -11,7 +11,7 @@ obj-y += bus/ obj-$(CONFIG_GENERIC_PHY) += phy/ # GPIO must come after pinctrl as gpios may need to mux pins etc -obj-y += pinctrl/ +obj-$(CONFIG_PINCTRL) += pinctrl/ obj-y += gpio/ obj-y += pwm/ obj-$(CONFIG_PCI) += pci/ @@ -161,7 +161,9 @@ obj-$(CONFIG_NTB) += ntb/ obj-$(CONFIG_FMC) += fmc/ obj-$(CONFIG_POWERCAP) += powercap/ obj-$(CONFIG_MCB) += mcb/ +obj-$(CONFIG_PERF_EVENTS) += perf/ obj-$(CONFIG_RAS) += ras/ obj-$(CONFIG_THUNDERBOLT) += thunderbolt/ obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/ obj-$(CONFIG_ANDROID) += android/ +obj-$(CONFIG_NVMEM) += nvmem/ diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 114cf4808..5d1015c26 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -189,17 +189,24 @@ config ACPI_DOCK This driver supports ACPI-controlled docking stations and removable drive bays such as the IBM Ultrabay and the Dell Module Bay. -config ACPI_PROCESSOR - tristate "Processor" +config ACPI_CPU_FREQ_PSS + bool select THERMAL + +config ACPI_PROCESSOR_IDLE + bool select CPU_IDLE + +config ACPI_PROCESSOR + tristate "Processor" depends on X86 || IA64 + select ACPI_PROCESSOR_IDLE + select ACPI_CPU_FREQ_PSS default y help - This driver installs ACPI as the idle handler for Linux and uses - ACPI C2 and C3 processor states to save power on systems that - support it. It is required by several flavors of cpufreq - performance-state drivers. + This driver adds support for the ACPI Processor package. It is required + by several flavors of cpufreq performance-state, thermal, throttling and + idle drivers. To compile this driver as a module, choose M here: the module will be called processor. @@ -410,6 +417,7 @@ config ACPI_NFIT tristate "ACPI NVDIMM Firmware Interface Table (NFIT)" depends on PHYS_ADDR_T_64BIT depends on BLK_DEV + depends on ARCH_HAS_MMIO_FLUSH select LIBNVDIMM help Infrastructure to probe ACPI 6 compliant platforms for diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 8321430d7..b5e7cd8a9 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -24,7 +24,7 @@ acpi-y += nvs.o # Power management related files acpi-y += wakeup.o acpi-$(CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT) += sleep.o -acpi-y += device_pm.o +acpi-y += device_sysfs.o device_pm.o acpi-$(CONFIG_ACPI_SLEEP) += proc.o @@ -80,8 +80,10 @@ obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o obj-$(CONFIG_ACPI_BGRT) += bgrt.o # processor has its own "processor." module_param namespace -processor-y := processor_driver.o processor_throttling.o -processor-y += processor_idle.o processor_thermal.o +processor-y := processor_driver.o +processor-$(CONFIG_ACPI_PROCESSOR_IDLE) += processor_idle.o +processor-$(CONFIG_ACPI_CPU_FREQ_PSS) += processor_throttling.o \ + processor_thermal.o processor-$(CONFIG_CPU_FREQ) += processor_perflib.o obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 9b5354a2c..f71b756b0 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -16,10 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index 3984ea96e..a450e7af8 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include "internal.h" diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c index ac0f52f6d..f77956c3f 100644 --- a/drivers/acpi/acpi_ipmi.c +++ b/drivers/acpi/acpi_ipmi.c @@ -17,10 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 46b58abb0..f51bd0d0b 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -11,7 +11,6 @@ */ #include -#include #include #include #include @@ -60,6 +59,7 @@ ACPI_MODULE_NAME("acpi_lpss"); #define LPSS_CLK_DIVIDER BIT(2) #define LPSS_LTR BIT(3) #define LPSS_SAVE_CTX BIT(4) +#define LPSS_NO_D3_DELAY BIT(5) struct lpss_private_data; @@ -156,6 +156,10 @@ static const struct lpss_device_desc byt_pwm_dev_desc = { .flags = LPSS_SAVE_CTX, }; +static const struct lpss_device_desc bsw_pwm_dev_desc = { + .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, +}; + static const struct lpss_device_desc byt_uart_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, .clk_con_id = "baudclk", @@ -163,6 +167,14 @@ static const struct lpss_device_desc byt_uart_dev_desc = { .setup = lpss_uart_setup, }; +static const struct lpss_device_desc bsw_uart_dev_desc = { + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX + | LPSS_NO_D3_DELAY, + .clk_con_id = "baudclk", + .prv_offset = 0x800, + .setup = lpss_uart_setup, +}; + static const struct lpss_device_desc byt_spi_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, .prv_offset = 0x400, @@ -178,8 +190,15 @@ static const struct lpss_device_desc byt_i2c_dev_desc = { .setup = byt_i2c_setup, }; +static const struct lpss_device_desc bsw_i2c_dev_desc = { + .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, + .prv_offset = 0x800, + .setup = byt_i2c_setup, +}; + static struct lpss_device_desc bsw_spi_dev_desc = { - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX + | LPSS_NO_D3_DELAY, .prv_offset = 0x400, .setup = lpss_deassert_reset, }; @@ -214,11 +233,12 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { { "INT33FC", }, /* Braswell LPSS devices */ - { "80862288", LPSS_ADDR(byt_pwm_dev_desc) }, - { "8086228A", LPSS_ADDR(byt_uart_dev_desc) }, + { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) }, + { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) }, { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) }, - { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) }, + { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) }, + /* Broadwell LPSS devices */ { "INT3430", LPSS_ADDR(lpt_dev_desc) }, { "INT3431", LPSS_ADDR(lpt_dev_desc) }, { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) }, @@ -558,9 +578,14 @@ static void acpi_lpss_restore_ctx(struct device *dev, * The following delay is needed or the subsequent write operations may * fail. The LPSS devices are actually PCI devices and the PCI spec * expects 10ms delay before the device can be accessed after D3 to D0 - * transition. + * transition. However some platforms like BSW does not need this delay. */ - msleep(10); + unsigned int delay = 10; /* default 10ms delay */ + + if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY) + delay = 0; + + msleep(delay); for (i = 0; i < LPSS_PRV_REG_COUNT; i++) { unsigned long offset = i * sizeof(u32); diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index ee28f4d15..6b0d3ef73 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c @@ -16,11 +16,6 @@ * NON INFRINGEMENT. See the GNU General Public License for more * details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * * ACPI based HotPlug driver that supports Memory Hotplug * This driver fields notifications from firmware for memory add * and remove operations and alerts the VM of the affected memory diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 00b39802d..ae307ff36 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -12,10 +12,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * */ #include diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c index fb765524c..c58940b23 100644 --- a/drivers/acpi/acpi_pnp.c +++ b/drivers/acpi/acpi_pnp.c @@ -19,8 +19,6 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = { {"PNP0600"}, /* Generic ESDI/IDE/ATA compatible hard disk controller */ /* floppy */ {"PNP0700"}, - /* ipmi_si */ - {"IPI0001"}, /* tpm_inf_pnp */ {"IFX0101"}, /* Infineon TPMs */ {"IFX0102"}, /* Infineon TPMs */ diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 92a5f738e..985b8a831 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -485,7 +485,7 @@ static const struct acpi_device_id processor_device_ids[] = { { } }; -static struct acpi_scan_handler __refdata processor_handler = { +static struct acpi_scan_handler processor_handler = { .ids = processor_device_ids, .attach = acpi_processor_add, #ifdef CONFIG_ACPI_HOTPLUG_CPU diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 8c2fe2f2f..5778e8e43 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -17,10 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index c1a963581..fedcc16b5 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile @@ -11,6 +11,7 @@ obj-y += acpi.o acpi-y := \ dsargs.o \ dscontrol.o \ + dsdebug.o \ dsfield.o \ dsinit.o \ dsmethod.o \ @@ -164,6 +165,7 @@ acpi-y += \ utmath.o \ utmisc.o \ utmutex.o \ + utnonansi.o \ utobject.o \ utosi.o \ utownerid.o \ diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h index 43685dd36..eb2e926d8 100644 --- a/drivers/acpi/acpica/acdebug.h +++ b/drivers/acpi/acpica/acdebug.h @@ -67,9 +67,6 @@ struct acpi_db_execute_walk { }; #define PARAM_LIST(pl) pl -#define DBTEST_OUTPUT_LEVEL(lvl) if (acpi_gbl_db_opt_verbose) -#define VERBOSE_PRINT(fp) DBTEST_OUTPUT_LEVEL(lvl) {\ - acpi_os_printf PARAM_LIST(fp);} #define EX_NO_SINGLE_STEP 1 #define EX_SINGLE_STEP 2 @@ -77,10 +74,6 @@ struct acpi_db_execute_walk { /* * dbxface - external debugger interfaces */ -acpi_status acpi_db_initialize(void); - -void acpi_db_terminate(void); - acpi_status acpi_db_single_step(struct acpi_walk_state *walk_state, union acpi_parse_object *op, u32 op_type); @@ -102,6 +95,8 @@ void acpi_db_display_interfaces(char *action_arg, char *interface_name_arg); acpi_status acpi_db_sleep(char *object_arg); +void acpi_db_trace(char *enable_arg, char *method_arg, char *once_arg); + void acpi_db_display_locks(void); void acpi_db_display_resources(char *object_arg); @@ -261,6 +256,23 @@ acpi_status acpi_db_user_commands(char prompt, union acpi_parse_object *op); char *acpi_db_get_next_token(char *string, char **next, acpi_object_type * return_type); +/* + * dbobject + */ +void acpi_db_decode_internal_object(union acpi_operand_object *obj_desc); + +void +acpi_db_display_internal_object(union acpi_operand_object *obj_desc, + struct acpi_walk_state *walk_state); + +void acpi_db_decode_arguments(struct acpi_walk_state *walk_state); + +void acpi_db_decode_locals(struct acpi_walk_state *walk_state); + +void +acpi_db_dump_method_info(acpi_status status, + struct acpi_walk_state *walk_state); + /* * dbstats - Generation and display of ACPI table statistics */ diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h index 408f04bca..7094dc89e 100644 --- a/drivers/acpi/acpica/acdispat.h +++ b/drivers/acpi/acpica/acdispat.h @@ -354,4 +354,12 @@ acpi_status acpi_ds_result_push(union acpi_operand_object *object, struct acpi_walk_state *walk_state); +/* + * dsdebug - parser debugging routines + */ +void +acpi_ds_dump_method_stack(acpi_status status, + struct acpi_walk_state *walk_state, + union acpi_parse_object *op); + #endif /* _ACDISPAT_H_ */ diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 53f96a370..4dde37c3d 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -58,11 +58,13 @@ ACPI_GLOBAL(struct acpi_table_list, acpi_gbl_root_table_list); ACPI_GLOBAL(struct acpi_table_header *, acpi_gbl_DSDT); ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header); +ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX); +ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX); +ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX); +ACPI_INIT_GLOBAL(u32, acpi_gbl_fadt_index, ACPI_INVALID_TABLE_INDEX); #if (!ACPI_REDUCED_HARDWARE) ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS); -ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_facs32); -ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_facs64); #endif /* !ACPI_REDUCED_HARDWARE */ @@ -235,6 +237,10 @@ ACPI_INIT_GLOBAL(u32, acpi_gbl_nesting_level, 0); ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list); +/* Maximum number of While() loop iterations before forced abort */ + +ACPI_GLOBAL(u16, acpi_gbl_max_loop_iterations); + /* Control method single step flag */ ACPI_GLOBAL(u8, acpi_gbl_cm_single_step); @@ -290,8 +296,6 @@ ACPI_GLOBAL(u32, acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]); ACPI_GLOBAL(u32, acpi_gbl_original_dbg_level); ACPI_GLOBAL(u32, acpi_gbl_original_dbg_layer); -ACPI_GLOBAL(u32, acpi_gbl_trace_dbg_level); -ACPI_GLOBAL(u32, acpi_gbl_trace_dbg_layer); /***************************************************************************** * @@ -309,9 +313,10 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_no_resource_disassembly, FALSE); ACPI_INIT_GLOBAL(u8, acpi_gbl_ignore_noop_operator, FALSE); ACPI_INIT_GLOBAL(u8, acpi_gbl_cstyle_disassembly, TRUE); ACPI_INIT_GLOBAL(u8, acpi_gbl_force_aml_disassembly, FALSE); +ACPI_INIT_GLOBAL(u8, acpi_gbl_dm_opt_verbose, TRUE); -ACPI_GLOBAL(u8, acpi_gbl_db_opt_disasm); -ACPI_GLOBAL(u8, acpi_gbl_db_opt_verbose); +ACPI_GLOBAL(u8, acpi_gbl_dm_opt_disasm); +ACPI_GLOBAL(u8, acpi_gbl_dm_opt_listing); ACPI_GLOBAL(u8, acpi_gbl_num_external_methods); ACPI_GLOBAL(u32, acpi_gbl_resolved_external_methods); ACPI_GLOBAL(struct acpi_external_list *, acpi_gbl_external_list); @@ -346,8 +351,8 @@ ACPI_GLOBAL(char, acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE]); /* * Statistic globals */ -ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TYPE_NS_NODE_MAX + 1]); -ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TYPE_NS_NODE_MAX + 1]); +ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TOTAL_TYPES]); +ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TOTAL_TYPES]); ACPI_GLOBAL(u16, acpi_gbl_obj_type_count_misc); ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc); ACPI_GLOBAL(u32, acpi_gbl_num_nodes); diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h index 7ac98000b..e820ed8f1 100644 --- a/drivers/acpi/acpica/acinterp.h +++ b/drivers/acpi/acpica/acinterp.h @@ -131,6 +131,28 @@ void acpi_ex_do_debug_object(union acpi_operand_object *source_desc, u32 level, u32 index); +void +acpi_ex_start_trace_method(struct acpi_namespace_node *method_node, + union acpi_operand_object *obj_desc, + struct acpi_walk_state *walk_state); + +void +acpi_ex_stop_trace_method(struct acpi_namespace_node *method_node, + union acpi_operand_object *obj_desc, + struct acpi_walk_state *walk_state); + +void +acpi_ex_start_trace_opcode(union acpi_parse_object *op, + struct acpi_walk_state *walk_state); + +void +acpi_ex_stop_trace_opcode(union acpi_parse_object *op, + struct acpi_walk_state *walk_state); + +void +acpi_ex_trace_point(acpi_trace_event_type type, + u8 begin, u8 *aml, char *pathname); + /* * exfield - ACPI AML (p-code) execution - field manipulation */ diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index bc600969c..6f708267a 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -174,8 +174,12 @@ struct acpi_namespace_node { */ #ifdef ACPI_LARGE_NAMESPACE_NODE union acpi_parse_object *op; + void *method_locals; + void *method_args; u32 value; u32 length; + u8 arg_count; + #endif }; @@ -209,11 +213,9 @@ struct acpi_table_list { #define ACPI_ROOT_ORIGIN_ALLOCATED (1) #define ACPI_ROOT_ALLOW_RESIZE (2) -/* Predefined (fixed) table indexes */ +/* Predefined table indexes */ -#define ACPI_TABLE_INDEX_DSDT (0) -#define ACPI_TABLE_INDEX_FACS (1) -#define ACPI_TABLE_INDEX_X_FACS (2) +#define ACPI_INVALID_TABLE_INDEX (0xFFFFFFFF) struct acpi_find_context { char *search_for; @@ -404,6 +406,13 @@ struct acpi_simple_repair_info { #define ACPI_NUM_RTYPES 5 /* Number of actual object types */ +/* Info for running the _REG methods */ + +struct acpi_reg_walk_info { + acpi_adr_space_type space_id; + u32 reg_run_count; +}; + /***************************************************************************** * * Event typedefs and structs @@ -715,7 +724,7 @@ union acpi_parse_value { union acpi_parse_object *arg; /* arguments and contained ops */ }; -#ifdef ACPI_DISASSEMBLER +#if defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUG_OUTPUT) #define ACPI_DISASM_ONLY_MEMBERS(a) a; #else #define ACPI_DISASM_ONLY_MEMBERS(a) @@ -726,7 +735,7 @@ union acpi_parse_value { u8 descriptor_type; /* To differentiate various internal objs */\ u8 flags; /* Type of Op */\ u16 aml_opcode; /* AML opcode */\ - u32 aml_offset; /* Offset of declaration in AML */\ + u8 *aml; /* Address of declaration in AML */\ union acpi_parse_object *next; /* Next op */\ struct acpi_namespace_node *node; /* For use by interpreter */\ union acpi_parse_value value; /* Value or args associated with the opcode */\ @@ -1103,6 +1112,9 @@ struct acpi_db_method_info { * Index of current thread inside all them created. */ char init_args; +#ifdef ACPI_DEBUGGER + acpi_object_type arg_types[4]; +#endif char *arguments[4]; char num_threads_str[11]; char id_of_thread_str[11]; @@ -1119,6 +1131,10 @@ struct acpi_integrity_info { #define ACPI_DB_CONSOLE_OUTPUT 0x02 #define ACPI_DB_DUPLICATE_OUTPUT 0x03 +struct acpi_object_info { + u32 types[ACPI_TOTAL_TYPES]; +}; + /***************************************************************************** * * Debug diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h index c240bdf82..e85366ceb 100644 --- a/drivers/acpi/acpica/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h @@ -220,6 +220,15 @@ #define ACPI_MUL_32(a) _ACPI_MUL(a, 5) #define ACPI_MOD_32(a) _ACPI_MOD(a, 32) +/* Test for ASCII character */ + +#define ACPI_IS_ASCII(c) ((c) < 0x80) + +/* Signed integers */ + +#define ACPI_SIGN_POSITIVE 0 +#define ACPI_SIGN_NEGATIVE 1 + /* * Rounding macros (Power of two boundaries only) */ diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h index 0dd088290..ea0d9076d 100644 --- a/drivers/acpi/acpica/acnamesp.h +++ b/drivers/acpi/acpica/acnamesp.h @@ -272,17 +272,20 @@ acpi_ns_check_package(struct acpi_evaluate_info *info, */ u32 acpi_ns_opens_scope(acpi_object_type type); -acpi_status -acpi_ns_build_external_path(struct acpi_namespace_node *node, - acpi_size size, char *name_buffer); - char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node); +u32 +acpi_ns_build_normalized_path(struct acpi_namespace_node *node, + char *full_path, u32 path_size, u8 no_trailing); + +char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node, + u8 no_trailing); + char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state); acpi_status acpi_ns_handle_to_pathname(acpi_handle target_handle, - struct acpi_buffer *buffer); + struct acpi_buffer *buffer, u8 no_trailing); u8 acpi_ns_pattern_match(struct acpi_namespace_node *obj_node, char *search_for); diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index c81d98d09..0bd02c4a5 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h @@ -176,6 +176,7 @@ struct acpi_object_method { u8 param_count; u8 sync_level; union acpi_operand_object *mutex; + union acpi_operand_object *node; u8 *aml_start; union { acpi_internal_method implementation; diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h index 0cdd2fce4..6021ccfb0 100644 --- a/drivers/acpi/acpica/acparser.h +++ b/drivers/acpi/acpica/acparser.h @@ -225,11 +225,11 @@ void acpi_ps_delete_parse_tree(union acpi_parse_object *root); /* * psutils - parser utilities */ -union acpi_parse_object *acpi_ps_create_scope_op(void); +union acpi_parse_object *acpi_ps_create_scope_op(u8 *aml); void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode); -union acpi_parse_object *acpi_ps_alloc_op(u16 opcode); +union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml); void acpi_ps_free_op(union acpi_parse_object *op); diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h index 44997ca02..f9992dced 100644 --- a/drivers/acpi/acpica/acstruct.h +++ b/drivers/acpi/acpica/acstruct.h @@ -85,7 +85,7 @@ struct acpi_walk_state { u8 namespace_override; /* Override existing objects */ u8 result_size; /* Total elements for the result stack */ u8 result_count; /* Current number of occupied elements of result stack */ - u32 aml_offset; + u8 *aml; u32 arg_types; u32 method_breakpoint; /* For single stepping */ u32 user_breakpoint; /* User AML breakpoint */ diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h index 7e0b6f1be..591ea9531 100644 --- a/drivers/acpi/acpica/actables.h +++ b/drivers/acpi/acpica/actables.h @@ -85,7 +85,7 @@ void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded); /* * tbfadt - FADT parse/convert/validate */ -void acpi_tb_parse_fadt(u32 table_index); +void acpi_tb_parse_fadt(void); void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length); @@ -138,8 +138,6 @@ acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id); */ acpi_status acpi_tb_initialize_facs(void); -u8 acpi_tb_tables_loaded(void); - void acpi_tb_print_table_header(acpi_physical_address address, struct acpi_table_header *header); @@ -154,14 +152,20 @@ void acpi_tb_check_dsdt_header(void); struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index); void -acpi_tb_install_table_with_override(u32 table_index, - struct acpi_table_desc *new_table_desc, - u8 override); +acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc, + u8 override, u32 *table_index); acpi_status acpi_tb_install_fixed_table(acpi_physical_address address, - char *signature, u32 table_index); + char *signature, u32 *table_index); acpi_status acpi_tb_parse_root_table(acpi_physical_address rsdp_address); +u8 acpi_is_valid_signature(char *signature); + +/* + * tbxfload + */ +acpi_status acpi_tb_load_namespace(void); + #endif /* __ACTABLES_H__ */ diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index 6de0d3573..fb2aa5066 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h @@ -166,6 +166,17 @@ struct acpi_pkg_info { #define DB_DWORD_DISPLAY 4 #define DB_QWORD_DISPLAY 8 +/* + * utnonansi - Non-ANSI C library functions + */ +void acpi_ut_strupr(char *src_string); + +void acpi_ut_strlwr(char *src_string); + +int acpi_ut_stricmp(char *string1, char *string2); + +acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer); + /* * utglobal - Global data structures and procedures */ @@ -205,8 +216,6 @@ acpi_status acpi_ut_hardware_initialize(void); void acpi_ut_subsystem_shutdown(void); -#define ACPI_IS_ASCII(c) ((c) < 0x80) - /* * utcopy - Object construction and conversion interfaces */ @@ -508,7 +517,7 @@ const struct acpi_exception_info *acpi_ut_validate_exception(acpi_status u8 acpi_ut_is_pci_root_bridge(char *id); -#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP) +#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP || defined ACPI_NAMES_APP) u8 acpi_ut_is_aml_table(struct acpi_table_header *table); #endif @@ -567,16 +576,6 @@ acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, u8 **end_tag); /* * utstring - String and character utilities */ -void acpi_ut_strupr(char *src_string); - -#ifdef ACPI_ASL_COMPILER -void acpi_ut_strlwr(char *src_string); - -int acpi_ut_stricmp(char *string1, char *string2); -#endif - -acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer); - void acpi_ut_print_string(char *string, u16 max_length); #if defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c index 3e6989738..e2ab59e39 100644 --- a/drivers/acpi/acpica/dsargs.c +++ b/drivers/acpi/acpica/dsargs.c @@ -86,7 +86,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node, /* Allocate a new parser op to be the root of the parsed tree */ - op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP); + op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP, aml_start); if (!op) { return_ACPI_STATUS(AE_NO_MEMORY); } @@ -129,7 +129,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node, /* Evaluate the deferred arguments */ - op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP); + op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP, aml_start); if (!op) { return_ACPI_STATUS(AE_NO_MEMORY); } diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c index 39da9da62..435fc16e2 100644 --- a/drivers/acpi/acpica/dscontrol.c +++ b/drivers/acpi/acpica/dscontrol.c @@ -212,7 +212,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state, */ control_state->control.loop_count++; if (control_state->control.loop_count > - ACPI_MAX_LOOP_ITERATIONS) { + acpi_gbl_max_loop_iterations) { status = AE_AML_INFINITE_LOOP; break; } diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c new file mode 100644 index 000000000..309556efc --- /dev/null +++ b/drivers/acpi/acpica/dsdebug.c @@ -0,0 +1,231 @@ +/****************************************************************************** + * + * Module Name: dsdebug - Parser/Interpreter interface - debugging + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2015, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include +#include "accommon.h" +#include "acdispat.h" +#include "acnamesp.h" +#ifdef ACPI_DISASSEMBLER +#include "acdisasm.h" +#endif +#include "acinterp.h" + +#define _COMPONENT ACPI_DISPATCHER +ACPI_MODULE_NAME("dsdebug") + +#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) +/* Local prototypes */ +static void +acpi_ds_print_node_pathname(struct acpi_namespace_node *node, + const char *message); + +/******************************************************************************* + * + * FUNCTION: acpi_ds_print_node_pathname + * + * PARAMETERS: node - Object + * message - Prefix message + * + * DESCRIPTION: Print an object's full namespace pathname + * Manages allocation/freeing of a pathname buffer + * + ******************************************************************************/ + +static void +acpi_ds_print_node_pathname(struct acpi_namespace_node *node, + const char *message) +{ + struct acpi_buffer buffer; + acpi_status status; + + ACPI_FUNCTION_TRACE(ds_print_node_pathname); + + if (!node) { + ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "[NULL NAME]")); + return_VOID; + } + + /* Convert handle to full pathname and print it (with supplied message) */ + + buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; + + status = acpi_ns_handle_to_pathname(node, &buffer, TRUE); + if (ACPI_SUCCESS(status)) { + if (message) { + ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "%s ", + message)); + } + + ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "[%s] (Node %p)", + (char *)buffer.pointer, node)); + ACPI_FREE(buffer.pointer); + } + + return_VOID; +} + +/******************************************************************************* + * + * FUNCTION: acpi_ds_dump_method_stack + * + * PARAMETERS: status - Method execution status + * walk_state - Current state of the parse tree walk + * op - Executing parse op + * + * RETURN: None + * + * DESCRIPTION: Called when a method has been aborted because of an error. + * Dumps the method execution stack. + * + ******************************************************************************/ + +void +acpi_ds_dump_method_stack(acpi_status status, + struct acpi_walk_state *walk_state, + union acpi_parse_object *op) +{ + union acpi_parse_object *next; + struct acpi_thread_state *thread; + struct acpi_walk_state *next_walk_state; + struct acpi_namespace_node *previous_method = NULL; + union acpi_operand_object *method_desc; + + ACPI_FUNCTION_TRACE(ds_dump_method_stack); + + /* Ignore control codes, they are not errors */ + + if ((status & AE_CODE_MASK) == AE_CODE_CONTROL) { + return_VOID; + } + + /* We may be executing a deferred opcode */ + + if (walk_state->deferred_node) { + ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, + "Executing subtree for Buffer/Package/Region\n")); + return_VOID; + } + + /* + * If there is no Thread, we are not actually executing a method. + * This can happen when the iASL compiler calls the interpreter + * to perform constant folding. + */ + thread = walk_state->thread; + if (!thread) { + return_VOID; + } + + /* Display exception and method name */ + + ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, + "\n**** Exception %s during execution of method ", + acpi_format_exception(status))); + acpi_ds_print_node_pathname(walk_state->method_node, NULL); + + /* Display stack of executing methods */ + + ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, + "\n\nMethod Execution Stack:\n")); + next_walk_state = thread->walk_state_list; + + /* Walk list of linked walk states */ + + while (next_walk_state) { + method_desc = next_walk_state->method_desc; + if (method_desc) { + acpi_ex_stop_trace_method((struct acpi_namespace_node *) + method_desc->method.node, + method_desc, walk_state); + } + + ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, + " Method [%4.4s] executing: ", + acpi_ut_get_node_name(next_walk_state-> + method_node))); + + /* First method is the currently executing method */ + + if (next_walk_state == walk_state) { + if (op) { + + /* Display currently executing ASL statement */ + + next = op->common.next; + op->common.next = NULL; + +#ifdef ACPI_DISASSEMBLER + acpi_dm_disassemble(next_walk_state, op, + ACPI_UINT32_MAX); +#endif + op->common.next = next; + } + } else { + /* + * This method has called another method + * NOTE: the method call parse subtree is already deleted at this + * point, so we cannot disassemble the method invocation. + */ + ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, + "Call to method ")); + acpi_ds_print_node_pathname(previous_method, NULL); + } + + previous_method = next_walk_state->method_node; + next_walk_state = next_walk_state->next; + ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "\n")); + } + + return_VOID; +} + +#else +void +acpi_ds_dump_method_stack(acpi_status status, + struct acpi_walk_state *walk_state, + union acpi_parse_object *op) +{ + return; +} + +#endif diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c index 95779e8ec..920f1b199 100644 --- a/drivers/acpi/acpica/dsinit.c +++ b/drivers/acpi/acpica/dsinit.c @@ -237,12 +237,22 @@ acpi_ds_initialize_objects(u32 table_index, return_ACPI_STATUS(status); } + /* DSDT is always the first AML table */ + + if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT)) { + ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, + "\nInitializing Namespace objects:\n")); + } + + /* Summary of objects initialized */ + ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, - "Table [%4.4s] (id %4.4X) - %4u Objects with %3u Devices, " - "%3u Regions, %3u Methods (%u/%u/%u Serial/Non/Cvt)\n", - table->signature, owner_id, info.object_count, - info.device_count, info.op_region_count, - info.method_count, info.serial_method_count, + "Table [%4.4s:%8.8s] (id %.2X) - %4u Objects with %3u Devices, " + "%3u Regions, %4u Methods (%u/%u/%u Serial/Non/Cvt)\n", + table->signature, table->oem_table_id, owner_id, + info.object_count, info.device_count, + info.op_region_count, info.method_count, + info.serial_method_count, info.non_serial_method_count, info.serialized_method_count)); diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index 85bb95143..bc32f3194 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -46,11 +46,9 @@ #include "acdispat.h" #include "acinterp.h" #include "acnamesp.h" -#ifdef ACPI_DISASSEMBLER -#include "acdisasm.h" -#endif #include "acparser.h" #include "amlcode.h" +#include "acdebug.h" #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dsmethod") @@ -103,7 +101,7 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node, /* Create/Init a root op for the method parse tree */ - op = acpi_ps_alloc_op(AML_METHOD_OP); + op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start); if (!op) { return_ACPI_STATUS(AE_NO_MEMORY); } @@ -205,7 +203,7 @@ acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state, * RETURN: Status * * DESCRIPTION: Called on method error. Invoke the global exception handler if - * present, dump the method data if the disassembler is configured + * present, dump the method data if the debugger is configured * * Note: Allows the exception handler to change the status code * @@ -214,6 +212,8 @@ acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state, acpi_status acpi_ds_method_error(acpi_status status, struct acpi_walk_state * walk_state) { + u32 aml_offset; + ACPI_FUNCTION_ENTRY(); /* Ignore AE_OK and control exception codes */ @@ -234,26 +234,30 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state * walk_state) * Handler can map the exception code to anything it wants, including * AE_OK, in which case the executing method will not be aborted. */ + aml_offset = (u32)ACPI_PTR_DIFF(walk_state->aml, + walk_state->parser_state. + aml_start); + status = acpi_gbl_exception_handler(status, walk_state->method_node ? walk_state->method_node-> name.integer : 0, walk_state->opcode, - walk_state->aml_offset, - NULL); + aml_offset, NULL); acpi_ex_enter_interpreter(); } acpi_ds_clear_implicit_return(walk_state); -#ifdef ACPI_DISASSEMBLER if (ACPI_FAILURE(status)) { + acpi_ds_dump_method_stack(status, walk_state, walk_state->op); - /* Display method locals/args if disassembler is present */ + /* Display method locals/args if debugger is present */ - acpi_dm_dump_method_info(status, walk_state, walk_state->op); - } +#ifdef ACPI_DEBUGGER + acpi_db_dump_method_info(status, walk_state); #endif + } return (status); } @@ -328,6 +332,8 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, return_ACPI_STATUS(AE_NULL_ENTRY); } + acpi_ex_start_trace_method(method_node, obj_desc, walk_state); + /* Prevent wraparound of thread count */ if (obj_desc->method.thread_count == ACPI_UINT8_MAX) { @@ -574,9 +580,7 @@ cleanup: /* On error, we must terminate the method properly */ acpi_ds_terminate_control_method(obj_desc, next_walk_state); - if (next_walk_state) { - acpi_ds_delete_walk_state(next_walk_state); - } + acpi_ds_delete_walk_state(next_walk_state); return_ACPI_STATUS(status); } @@ -826,5 +830,8 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, } } + acpi_ex_stop_trace_method((struct acpi_namespace_node *)method_desc-> + method.node, method_desc, walk_state); + return_VOID; } diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index ea0cc4e08..81d7b9863 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c @@ -480,8 +480,8 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state, union acpi_operand_object **operand; struct acpi_namespace_node *node; union acpi_parse_object *next_op; - u32 table_index; struct acpi_table_header *table; + u32 table_index; ACPI_FUNCTION_TRACE_PTR(ds_eval_table_region_operands, op); @@ -504,6 +504,8 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state, return_ACPI_STATUS(status); } + operand = &walk_state->operands[0]; + /* * Resolve the Signature string, oem_id string, * and oem_table_id string operands @@ -511,32 +513,34 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state, status = acpi_ex_resolve_operands(op->common.aml_opcode, ACPI_WALK_OPERANDS, walk_state); if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); + goto cleanup; } - operand = &walk_state->operands[0]; - /* Find the ACPI table */ status = acpi_tb_find_table(operand[0]->string.pointer, operand[1]->string.pointer, operand[2]->string.pointer, &table_index); if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); + if (status == AE_NOT_FOUND) { + ACPI_ERROR((AE_INFO, + "ACPI Table [%4.4s] OEM:(%s, %s) not found in RSDT/XSDT", + operand[0]->string.pointer, + operand[1]->string.pointer, + operand[2]->string.pointer)); + } + goto cleanup; } - acpi_ut_remove_reference(operand[0]); - acpi_ut_remove_reference(operand[1]); - acpi_ut_remove_reference(operand[2]); - status = acpi_get_table_by_index(table_index, &table); if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); + goto cleanup; } obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc) { - return_ACPI_STATUS(AE_NOT_EXIST); + status = AE_NOT_EXIST; + goto cleanup; } obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table); @@ -551,6 +555,11 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state, obj_desc->region.flags |= AOPOBJ_DATA_VALID; +cleanup: + acpi_ut_remove_reference(operand[0]); + acpi_ut_remove_reference(operand[1]); + acpi_ut_remove_reference(operand[2]); + return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c index 845ff4491..097188a6b 100644 --- a/drivers/acpi/acpica/dswload.c +++ b/drivers/acpi/acpica/dswload.c @@ -388,7 +388,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, /* Create a new op */ - op = acpi_ps_alloc_op(walk_state->opcode); + op = acpi_ps_alloc_op(walk_state->opcode, walk_state->aml); if (!op) { return_ACPI_STATUS(AE_NO_MEMORY); } diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c index fcaa30c61..e2c08cd79 100644 --- a/drivers/acpi/acpica/dswload2.c +++ b/drivers/acpi/acpica/dswload2.c @@ -335,7 +335,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state, /* Create a new op */ - op = acpi_ps_alloc_op(walk_state->opcode); + op = acpi_ps_alloc_op(walk_state->opcode, walk_state->aml); if (!op) { return_ACPI_STATUS(AE_NO_MEMORY); } diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index 2ba28a63f..5ee79a16f 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c @@ -626,9 +626,17 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, acpi_adr_space_type space_id) { acpi_status status; + struct acpi_reg_walk_info info; ACPI_FUNCTION_TRACE(ev_execute_reg_methods); + info.space_id = space_id; + info.reg_run_count = 0; + + ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES, + " Running _REG methods for SpaceId %s\n", + acpi_ut_get_region_name(info.space_id))); + /* * Run all _REG methods for all Operation Regions for this space ID. This * is a separate walk in order to handle any interdependencies between @@ -637,7 +645,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, - NULL, &space_id, NULL); + NULL, &info, NULL); /* Special case for EC: handle "orphan" _REG methods with no region */ @@ -645,6 +653,11 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, acpi_ev_orphan_ec_reg_method(node); } + ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES, + " Executed %u _REG methods for SpaceId %s\n", + info.reg_run_count, + acpi_ut_get_region_name(info.space_id))); + return_ACPI_STATUS(status); } @@ -664,10 +677,10 @@ acpi_ev_reg_run(acpi_handle obj_handle, { union acpi_operand_object *obj_desc; struct acpi_namespace_node *node; - acpi_adr_space_type space_id; acpi_status status; + struct acpi_reg_walk_info *info; - space_id = *ACPI_CAST_PTR(acpi_adr_space_type, context); + info = ACPI_CAST_PTR(struct acpi_reg_walk_info, context); /* Convert and validate the device handle */ @@ -696,13 +709,14 @@ acpi_ev_reg_run(acpi_handle obj_handle, /* Object is a Region */ - if (obj_desc->region.space_id != space_id) { + if (obj_desc->region.space_id != info->space_id) { /* This region is for a different address space, just ignore it */ return (AE_OK); } + info->reg_run_count++; status = acpi_ev_execute_reg_method(obj_desc, ACPI_REG_CONNECT); return (status); } diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index faad911d4..10ce48e16 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c @@ -71,7 +71,7 @@ acpi_status acpi_enable(void) /* ACPI tables must be present */ - if (!acpi_tb_tables_loaded()) { + if (acpi_gbl_fadt_index == ACPI_INVALID_TABLE_INDEX) { return_ACPI_STATUS(AE_NO_ACPI_TABLES); } diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c index 24a4c5c2b..b540913c1 100644 --- a/drivers/acpi/acpica/exconfig.c +++ b/drivers/acpi/acpica/exconfig.c @@ -162,14 +162,6 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state, ACPI_FUNCTION_TRACE(ex_load_table_op); - /* Validate lengths for the Signature, oem_id, and oem_table_id strings */ - - if ((operand[0]->string.length > ACPI_NAME_SIZE) || - (operand[1]->string.length > ACPI_OEM_ID_SIZE) || - (operand[2]->string.length > ACPI_OEM_TABLE_ID_SIZE)) { - return_ACPI_STATUS(AE_AML_STRING_LIMIT); - } - /* Find the ACPI table in the RSDT/XSDT */ status = acpi_tb_find_table(operand[0]->string.pointer, diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c index aaeea4840..ccb7219bd 100644 --- a/drivers/acpi/acpica/excreate.c +++ b/drivers/acpi/acpica/excreate.c @@ -486,6 +486,7 @@ acpi_ex_create_method(u8 * aml_start, obj_desc->method.aml_start = aml_start; obj_desc->method.aml_length = aml_length; + obj_desc->method.node = operand[0]; /* * Disassemble the method flags. Split off the arg_count, Serialized diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c index 815442bbd..de9245823 100644 --- a/drivers/acpi/acpica/exdebug.c +++ b/drivers/acpi/acpica/exdebug.c @@ -43,11 +43,21 @@ #include #include "accommon.h" +#include "acnamesp.h" #include "acinterp.h" +#include "acparser.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exdebug") +static union acpi_operand_object *acpi_gbl_trace_method_object = NULL; + +/* Local prototypes */ + +#ifdef ACPI_DEBUG_OUTPUT +static const char *acpi_ex_get_trace_event_name(acpi_trace_event_type type); +#endif + #ifndef ACPI_NO_ERROR_MESSAGES /******************************************************************************* * @@ -70,6 +80,7 @@ ACPI_MODULE_NAME("exdebug") * enabled if necessary. * ******************************************************************************/ + void acpi_ex_do_debug_object(union acpi_operand_object *source_desc, u32 level, u32 index) @@ -308,3 +319,316 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc, return_VOID; } #endif + +/******************************************************************************* + * + * FUNCTION: acpi_ex_interpreter_trace_enabled + * + * PARAMETERS: name - Whether method name should be matched, + * this should be checked before starting + * the tracer + * + * RETURN: TRUE if interpreter trace is enabled. + * + * DESCRIPTION: Check whether interpreter trace is enabled + * + ******************************************************************************/ + +static u8 acpi_ex_interpreter_trace_enabled(char *name) +{ + + /* Check if tracing is enabled */ + + if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED)) { + return (FALSE); + } + + /* + * Check if tracing is filtered: + * + * 1. If the tracer is started, acpi_gbl_trace_method_object should have + * been filled by the trace starter + * 2. If the tracer is not started, acpi_gbl_trace_method_name should be + * matched if it is specified + * 3. If the tracer is oneshot style, acpi_gbl_trace_method_name should + * not be cleared by the trace stopper during the first match + */ + if (acpi_gbl_trace_method_object) { + return (TRUE); + } + if (name && + (acpi_gbl_trace_method_name && + strcmp(acpi_gbl_trace_method_name, name))) { + return (FALSE); + } + if ((acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) && + !acpi_gbl_trace_method_name) { + return (FALSE); + } + + return (TRUE); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ex_get_trace_event_name + * + * PARAMETERS: type - Trace event type + * + * RETURN: Trace event name. + * + * DESCRIPTION: Used to obtain the full trace event name. + * + ******************************************************************************/ + +#ifdef ACPI_DEBUG_OUTPUT + +static const char *acpi_ex_get_trace_event_name(acpi_trace_event_type type) +{ + switch (type) { + case ACPI_TRACE_AML_METHOD: + + return "Method"; + + case ACPI_TRACE_AML_OPCODE: + + return "Opcode"; + + case ACPI_TRACE_AML_REGION: + + return "Region"; + + default: + + return ""; + } +} + +#endif + +/******************************************************************************* + * + * FUNCTION: acpi_ex_trace_point + * + * PARAMETERS: type - Trace event type + * begin - TRUE if before execution + * aml - Executed AML address + * pathname - Object path + * + * RETURN: None + * + * DESCRIPTION: Internal interpreter execution trace. + * + ******************************************************************************/ + +void +acpi_ex_trace_point(acpi_trace_event_type type, + u8 begin, u8 *aml, char *pathname) +{ + + ACPI_FUNCTION_NAME(ex_trace_point); + + if (pathname) { + ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT, + "%s %s [0x%p:%s] execution.\n", + acpi_ex_get_trace_event_name(type), + begin ? "Begin" : "End", aml, pathname)); + } else { + ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT, + "%s %s [0x%p] execution.\n", + acpi_ex_get_trace_event_name(type), + begin ? "Begin" : "End", aml)); + } +} + +/******************************************************************************* + * + * FUNCTION: acpi_ex_start_trace_method + * + * PARAMETERS: method_node - Node of the method + * obj_desc - The method object + * walk_state - current state, NULL if not yet executing + * a method. + * + * RETURN: None + * + * DESCRIPTION: Start control method execution trace + * + ******************************************************************************/ + +void +acpi_ex_start_trace_method(struct acpi_namespace_node *method_node, + union acpi_operand_object *obj_desc, + struct acpi_walk_state *walk_state) +{ + acpi_status status; + char *pathname = NULL; + u8 enabled = FALSE; + + ACPI_FUNCTION_NAME(ex_start_trace_method); + + if (method_node) { + pathname = acpi_ns_get_normalized_pathname(method_node, TRUE); + } + + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + goto exit; + } + + enabled = acpi_ex_interpreter_trace_enabled(pathname); + if (enabled && !acpi_gbl_trace_method_object) { + acpi_gbl_trace_method_object = obj_desc; + acpi_gbl_original_dbg_level = acpi_dbg_level; + acpi_gbl_original_dbg_layer = acpi_dbg_layer; + acpi_dbg_level = ACPI_TRACE_LEVEL_ALL; + acpi_dbg_layer = ACPI_TRACE_LAYER_ALL; + + if (acpi_gbl_trace_dbg_level) { + acpi_dbg_level = acpi_gbl_trace_dbg_level; + } + if (acpi_gbl_trace_dbg_layer) { + acpi_dbg_layer = acpi_gbl_trace_dbg_layer; + } + } + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + +exit: + if (enabled) { + ACPI_TRACE_POINT(ACPI_TRACE_AML_METHOD, TRUE, + obj_desc ? obj_desc->method.aml_start : NULL, + pathname); + } + if (pathname) { + ACPI_FREE(pathname); + } +} + +/******************************************************************************* + * + * FUNCTION: acpi_ex_stop_trace_method + * + * PARAMETERS: method_node - Node of the method + * obj_desc - The method object + * walk_state - current state, NULL if not yet executing + * a method. + * + * RETURN: None + * + * DESCRIPTION: Stop control method execution trace + * + ******************************************************************************/ + +void +acpi_ex_stop_trace_method(struct acpi_namespace_node *method_node, + union acpi_operand_object *obj_desc, + struct acpi_walk_state *walk_state) +{ + acpi_status status; + char *pathname = NULL; + u8 enabled; + + ACPI_FUNCTION_NAME(ex_stop_trace_method); + + if (method_node) { + pathname = acpi_ns_get_normalized_pathname(method_node, TRUE); + } + + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + goto exit_path; + } + + enabled = acpi_ex_interpreter_trace_enabled(NULL); + + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + + if (enabled) { + ACPI_TRACE_POINT(ACPI_TRACE_AML_METHOD, FALSE, + obj_desc ? obj_desc->method.aml_start : NULL, + pathname); + } + + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + goto exit_path; + } + + /* Check whether the tracer should be stopped */ + + if (acpi_gbl_trace_method_object == obj_desc) { + + /* Disable further tracing if type is one-shot */ + + if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) { + acpi_gbl_trace_method_name = NULL; + } + + acpi_dbg_level = acpi_gbl_original_dbg_level; + acpi_dbg_layer = acpi_gbl_original_dbg_layer; + acpi_gbl_trace_method_object = NULL; + } + + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + +exit_path: + if (pathname) { + ACPI_FREE(pathname); + } +} + +/******************************************************************************* + * + * FUNCTION: acpi_ex_start_trace_opcode + * + * PARAMETERS: op - The parser opcode object + * walk_state - current state, NULL if not yet executing + * a method. + * + * RETURN: None + * + * DESCRIPTION: Start opcode execution trace + * + ******************************************************************************/ + +void +acpi_ex_start_trace_opcode(union acpi_parse_object *op, + struct acpi_walk_state *walk_state) +{ + + ACPI_FUNCTION_NAME(ex_start_trace_opcode); + + if (acpi_ex_interpreter_trace_enabled(NULL) && + (acpi_gbl_trace_flags & ACPI_TRACE_OPCODE)) { + ACPI_TRACE_POINT(ACPI_TRACE_AML_OPCODE, TRUE, + op->common.aml, op->common.aml_op_name); + } +} + +/******************************************************************************* + * + * FUNCTION: acpi_ex_stop_trace_opcode + * + * PARAMETERS: op - The parser opcode object + * walk_state - current state, NULL if not yet executing + * a method. + * + * RETURN: None + * + * DESCRIPTION: Stop opcode execution trace + * + ******************************************************************************/ + +void +acpi_ex_stop_trace_opcode(union acpi_parse_object *op, + struct acpi_walk_state *walk_state) +{ + + ACPI_FUNCTION_NAME(ex_stop_trace_opcode); + + if (acpi_ex_interpreter_trace_enabled(NULL) && + (acpi_gbl_trace_flags & ACPI_TRACE_OPCODE)) { + ACPI_TRACE_POINT(ACPI_TRACE_AML_OPCODE, FALSE, + op->common.aml, op->common.aml_op_name); + } +} diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c index 401e7edcd..d836f888b 100644 --- a/drivers/acpi/acpica/exdump.c +++ b/drivers/acpi/acpica/exdump.c @@ -995,9 +995,8 @@ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc) if (obj_desc->reference.class == ACPI_REFCLASS_NAME) { acpi_os_printf(" %p ", obj_desc->reference.node); - status = - acpi_ns_handle_to_pathname(obj_desc->reference.node, - &ret_buf); + status = acpi_ns_handle_to_pathname(obj_desc->reference.node, + &ret_buf, TRUE); if (ACPI_FAILURE(status)) { acpi_os_printf(" Could not convert name to pathname\n"); } else { diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c index c7e3b929a..1b372ef69 100644 --- a/drivers/acpi/acpica/exresnte.c +++ b/drivers/acpi/acpica/exresnte.c @@ -126,7 +126,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr, if (!source_desc) { ACPI_ERROR((AE_INFO, "No object attached to node [%4.4s] %p", node->name.ascii, node)); - return_ACPI_STATUS(AE_AML_NO_OPERAND); + return_ACPI_STATUS(AE_AML_UNINITIALIZED_NODE); } /* diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c index b6b7f3af2..7b109128b 100644 --- a/drivers/acpi/acpica/exresolv.c +++ b/drivers/acpi/acpica/exresolv.c @@ -337,8 +337,9 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state, acpi_object_type * return_type, union acpi_operand_object **return_desc) { - union acpi_operand_object *obj_desc = (void *)operand; - struct acpi_namespace_node *node; + union acpi_operand_object *obj_desc = ACPI_CAST_PTR(void, operand); + struct acpi_namespace_node *node = + ACPI_CAST_PTR(struct acpi_namespace_node, operand); acpi_object_type type; acpi_status status; @@ -355,9 +356,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state, case ACPI_DESC_TYPE_NAMED: type = ((struct acpi_namespace_node *)obj_desc)->type; - obj_desc = - acpi_ns_get_attached_object((struct acpi_namespace_node *) - obj_desc); + obj_desc = acpi_ns_get_attached_object(node); /* If we had an Alias node, use the attached object for type info */ @@ -368,6 +367,13 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state, acpi_namespace_node *) obj_desc); } + + if (!obj_desc) { + ACPI_ERROR((AE_INFO, + "[%4.4s] Node is unresolved or uninitialized", + acpi_ut_get_node_name(node))); + return_ACPI_STATUS(AE_AML_UNINITIALIZED_NODE); + } break; default: diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index 52dfd0d05..d62a61612 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c @@ -160,19 +160,8 @@ acpi_set_firmware_waking_vectors(acpi_physical_address physical_address, ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vectors); - /* If Hardware Reduced flag is set, there is no FACS */ - - if (acpi_gbl_reduced_hardware) { - return_ACPI_STATUS (AE_OK); - } - - if (acpi_gbl_facs32) { - (void)acpi_hw_set_firmware_waking_vectors(acpi_gbl_facs32, - physical_address, - physical_address64); - } - if (acpi_gbl_facs64) { - (void)acpi_hw_set_firmware_waking_vectors(acpi_gbl_facs64, + if (acpi_gbl_FACS) { + (void)acpi_hw_set_firmware_waking_vectors(acpi_gbl_FACS, physical_address, physical_address64); } diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c index 80670cb32..7eba578d3 100644 --- a/drivers/acpi/acpica/nseval.c +++ b/drivers/acpi/acpica/nseval.c @@ -274,6 +274,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info) acpi_ex_exit_interpreter(); if (ACPI_FAILURE(status)) { + info->return_object = NULL; goto cleanup; } @@ -464,7 +465,8 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj, status = acpi_ns_evaluate(info); - ACPI_DEBUG_PRINT((ACPI_DB_INIT, "Executed module-level code at %p\n", + ACPI_DEBUG_PRINT((ACPI_DB_INIT_NAMES, + "Executed module-level code at %p\n", method_obj->method.aml_start)); /* Delete a possible implicit return value (in slack mode) */ diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c index bd6cd4a81..14ab83668 100644 --- a/drivers/acpi/acpica/nsload.c +++ b/drivers/acpi/acpica/nsload.c @@ -111,7 +111,21 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node) if (ACPI_SUCCESS(status)) { acpi_tb_set_table_loaded_flag(table_index, TRUE); } else { - (void)acpi_tb_release_owner_id(table_index); + /* + * On error, delete any namespace objects created by this table. + * We cannot initialize these objects, so delete them. There are + * a couple of expecially bad cases: + * AE_ALREADY_EXISTS - namespace collision. + * AE_NOT_FOUND - the target of a Scope operator does not + * exist. This target of Scope must already exist in the + * namespace, as per the ACPI specification. + */ + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + acpi_ns_delete_namespace_by_owner(acpi_gbl_root_table_list. + tables[table_index].owner_id); + acpi_tb_release_owner_id(table_index); + + return_ACPI_STATUS(status); } unlock: diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c index d293d9748..8934b4edd 100644 --- a/drivers/acpi/acpica/nsnames.c +++ b/drivers/acpi/acpica/nsnames.c @@ -49,73 +49,6 @@ #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsnames") -/******************************************************************************* - * - * FUNCTION: acpi_ns_build_external_path - * - * PARAMETERS: node - NS node whose pathname is needed - * size - Size of the pathname - * *name_buffer - Where to return the pathname - * - * RETURN: Status - * Places the pathname into the name_buffer, in external format - * (name segments separated by path separators) - * - * DESCRIPTION: Generate a full pathaname - * - ******************************************************************************/ -acpi_status -acpi_ns_build_external_path(struct acpi_namespace_node *node, - acpi_size size, char *name_buffer) -{ - acpi_size index; - struct acpi_namespace_node *parent_node; - - ACPI_FUNCTION_ENTRY(); - - /* Special case for root */ - - index = size - 1; - if (index < ACPI_NAME_SIZE) { - name_buffer[0] = AML_ROOT_PREFIX; - name_buffer[1] = 0; - return (AE_OK); - } - - /* Store terminator byte, then build name backwards */ - - parent_node = node; - name_buffer[index] = 0; - - while ((index > ACPI_NAME_SIZE) && (parent_node != acpi_gbl_root_node)) { - index -= ACPI_NAME_SIZE; - - /* Put the name into the buffer */ - - ACPI_MOVE_32_TO_32((name_buffer + index), &parent_node->name); - parent_node = parent_node->parent; - - /* Prefix name with the path separator */ - - index--; - name_buffer[index] = ACPI_PATH_SEPARATOR; - } - - /* Overwrite final separator with the root prefix character */ - - name_buffer[index] = AML_ROOT_PREFIX; - - if (index != 0) { - ACPI_ERROR((AE_INFO, - "Could not construct external pathname; index=%u, size=%u, Path=%s", - (u32) index, (u32) size, &name_buffer[size])); - - return (AE_BAD_PARAMETER); - } - - return (AE_OK); -} - /******************************************************************************* * * FUNCTION: acpi_ns_get_external_pathname @@ -130,37 +63,13 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node, * for error and debug statements. * ******************************************************************************/ - char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) { - acpi_status status; char *name_buffer; - acpi_size size; ACPI_FUNCTION_TRACE_PTR(ns_get_external_pathname, node); - /* Calculate required buffer size based on depth below root */ - - size = acpi_ns_get_pathname_length(node); - if (!size) { - return_PTR(NULL); - } - - /* Allocate a buffer to be returned to caller */ - - name_buffer = ACPI_ALLOCATE_ZEROED(size); - if (!name_buffer) { - ACPI_ERROR((AE_INFO, "Could not allocate %u bytes", (u32)size)); - return_PTR(NULL); - } - - /* Build the path in the allocated buffer */ - - status = acpi_ns_build_external_path(node, size, name_buffer); - if (ACPI_FAILURE(status)) { - ACPI_FREE(name_buffer); - return_PTR(NULL); - } + name_buffer = acpi_ns_get_normalized_pathname(node, FALSE); return_PTR(name_buffer); } @@ -180,33 +89,12 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node) { acpi_size size; - struct acpi_namespace_node *next_node; ACPI_FUNCTION_ENTRY(); - /* - * Compute length of pathname as 5 * number of name segments. - * Go back up the parent tree to the root - */ - size = 0; - next_node = node; + size = acpi_ns_build_normalized_path(node, NULL, 0, FALSE); - while (next_node && (next_node != acpi_gbl_root_node)) { - if (ACPI_GET_DESCRIPTOR_TYPE(next_node) != ACPI_DESC_TYPE_NAMED) { - ACPI_ERROR((AE_INFO, - "Invalid Namespace Node (%p) while traversing namespace", - next_node)); - return (0); - } - size += ACPI_PATH_SEGMENT_LENGTH; - next_node = next_node->parent; - } - - if (!size) { - size = 1; /* Root node case */ - } - - return (size + 1); /* +1 for null string terminator */ + return (size); } /******************************************************************************* @@ -216,6 +104,8 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node) * PARAMETERS: target_handle - Handle of named object whose name is * to be found * buffer - Where the pathname is returned + * no_trailing - Remove trailing '_' for each name + * segment * * RETURN: Status, Buffer is filled with pathname if status is AE_OK * @@ -225,7 +115,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node) acpi_status acpi_ns_handle_to_pathname(acpi_handle target_handle, - struct acpi_buffer * buffer) + struct acpi_buffer * buffer, u8 no_trailing) { acpi_status status; struct acpi_namespace_node *node; @@ -240,7 +130,8 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle, /* Determine size required for the caller buffer */ - required_size = acpi_ns_get_pathname_length(node); + required_size = + acpi_ns_build_normalized_path(node, NULL, 0, no_trailing); if (!required_size) { return_ACPI_STATUS(AE_BAD_PARAMETER); } @@ -254,8 +145,8 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle, /* Build the path in the caller buffer */ - status = - acpi_ns_build_external_path(node, required_size, buffer->pointer); + (void)acpi_ns_build_normalized_path(node, buffer->pointer, + required_size, no_trailing); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } @@ -264,3 +155,149 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle, (char *)buffer->pointer, (u32) required_size)); return_ACPI_STATUS(AE_OK); } + +/******************************************************************************* + * + * FUNCTION: acpi_ns_build_normalized_path + * + * PARAMETERS: node - Namespace node + * full_path - Where the path name is returned + * path_size - Size of returned path name buffer + * no_trailing - Remove trailing '_' from each name segment + * + * RETURN: Return 1 if the AML path is empty, otherwise returning (length + * of pathname + 1) which means the 'FullPath' contains a trailing + * null. + * + * DESCRIPTION: Build and return a full namespace pathname. + * Note that if the size of 'FullPath' isn't large enough to + * contain the namespace node's path name, the actual required + * buffer length is returned, and it should be greater than + * 'PathSize'. So callers are able to check the returning value + * to determine the buffer size of 'FullPath'. + * + ******************************************************************************/ + +u32 +acpi_ns_build_normalized_path(struct acpi_namespace_node *node, + char *full_path, u32 path_size, u8 no_trailing) +{ + u32 length = 0, i; + char name[ACPI_NAME_SIZE]; + u8 do_no_trailing; + char c, *left, *right; + struct acpi_namespace_node *next_node; + + ACPI_FUNCTION_TRACE_PTR(ns_build_normalized_path, node); + +#define ACPI_PATH_PUT8(path, size, byte, length) \ + do { \ + if ((length) < (size)) \ + { \ + (path)[(length)] = (byte); \ + } \ + (length)++; \ + } while (0) + + /* + * Make sure the path_size is correct, so that we don't need to + * validate both full_path and path_size. + */ + if (!full_path) { + path_size = 0; + } + + if (!node) { + goto build_trailing_null; + } + + next_node = node; + while (next_node && next_node != acpi_gbl_root_node) { + if (next_node != node) { + ACPI_PATH_PUT8(full_path, path_size, + AML_DUAL_NAME_PREFIX, length); + } + ACPI_MOVE_32_TO_32(name, &next_node->name); + do_no_trailing = no_trailing; + for (i = 0; i < 4; i++) { + c = name[4 - i - 1]; + if (do_no_trailing && c != '_') { + do_no_trailing = FALSE; + } + if (!do_no_trailing) { + ACPI_PATH_PUT8(full_path, path_size, c, length); + } + } + next_node = next_node->parent; + } + ACPI_PATH_PUT8(full_path, path_size, AML_ROOT_PREFIX, length); + + /* Reverse the path string */ + + if (length <= path_size) { + left = full_path; + right = full_path + length - 1; + while (left < right) { + c = *left; + *left++ = *right; + *right-- = c; + } + } + + /* Append the trailing null */ + +build_trailing_null: + ACPI_PATH_PUT8(full_path, path_size, '\0', length); + +#undef ACPI_PATH_PUT8 + + return_UINT32(length); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ns_get_normalized_pathname + * + * PARAMETERS: node - Namespace node whose pathname is needed + * no_trailing - Remove trailing '_' from each name segment + * + * RETURN: Pointer to storage containing the fully qualified name of + * the node, In external format (name segments separated by path + * separators.) + * + * DESCRIPTION: Used to obtain the full pathname to a namespace node, usually + * for error and debug statements. All trailing '_' will be + * removed from the full pathname if 'NoTrailing' is specified.. + * + ******************************************************************************/ + +char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node, + u8 no_trailing) +{ + char *name_buffer; + acpi_size size; + + ACPI_FUNCTION_TRACE_PTR(ns_get_normalized_pathname, node); + + /* Calculate required buffer size based on depth below root */ + + size = acpi_ns_build_normalized_path(node, NULL, 0, no_trailing); + if (!size) { + return_PTR(NULL); + } + + /* Allocate a buffer to be returned to caller */ + + name_buffer = ACPI_ALLOCATE_ZEROED(size); + if (!name_buffer) { + ACPI_ERROR((AE_INFO, "Could not allocate %u bytes", (u32)size)); + return_PTR(NULL); + } + + /* Build the path in the allocated buffer */ + + (void)acpi_ns_build_normalized_path(node, name_buffer, size, + no_trailing); + + return_PTR(name_buffer); +} diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c index 57a4cfe54..3736d43b1 100644 --- a/drivers/acpi/acpica/nsparse.c +++ b/drivers/acpi/acpica/nsparse.c @@ -70,7 +70,7 @@ acpi_ns_one_complete_parse(u32 pass_number, { union acpi_parse_object *parse_root; acpi_status status; - u32 aml_length; + u32 aml_length; u8 *aml_start; struct acpi_walk_state *walk_state; struct acpi_table_header *table; @@ -78,6 +78,20 @@ acpi_ns_one_complete_parse(u32 pass_number, ACPI_FUNCTION_TRACE(ns_one_complete_parse); + status = acpi_get_table_by_index(table_index, &table); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* Table must consist of at least a complete header */ + + if (table->length < sizeof(struct acpi_table_header)) { + return_ACPI_STATUS(AE_BAD_HEADER); + } + + aml_start = (u8 *)table + sizeof(struct acpi_table_header); + aml_length = table->length - sizeof(struct acpi_table_header); + status = acpi_tb_get_owner_id(table_index, &owner_id); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); @@ -85,7 +99,7 @@ acpi_ns_one_complete_parse(u32 pass_number, /* Create and init a Root Node */ - parse_root = acpi_ps_create_scope_op(); + parse_root = acpi_ps_create_scope_op(aml_start); if (!parse_root) { return_ACPI_STATUS(AE_NO_MEMORY); } @@ -98,23 +112,12 @@ acpi_ns_one_complete_parse(u32 pass_number, return_ACPI_STATUS(AE_NO_MEMORY); } - status = acpi_get_table_by_index(table_index, &table); + status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL, + aml_start, aml_length, NULL, + (u8)pass_number); if (ACPI_FAILURE(status)) { acpi_ds_delete_walk_state(walk_state); - acpi_ps_free_op(parse_root); - return_ACPI_STATUS(status); - } - - /* Table must consist of at least a complete header */ - - if (table->length < sizeof(struct acpi_table_header)) { - status = AE_BAD_HEADER; - } else { - aml_start = (u8 *) table + sizeof(struct acpi_table_header); - aml_length = table->length - sizeof(struct acpi_table_header); - status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL, - aml_start, aml_length, NULL, - (u8) pass_number); + goto cleanup; } /* Found OSDT table, enable the namespace override feature */ @@ -124,11 +127,6 @@ acpi_ns_one_complete_parse(u32 pass_number, walk_state->namespace_override = TRUE; } - if (ACPI_FAILURE(status)) { - acpi_ds_delete_walk_state(walk_state); - goto cleanup; - } - /* start_node is the default location to load the table */ if (start_node && start_node != acpi_gbl_root_node) { diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c index 8d8104b8b..de325ae04 100644 --- a/drivers/acpi/acpica/nsutils.c +++ b/drivers/acpi/acpica/nsutils.c @@ -83,7 +83,7 @@ acpi_ns_print_node_pathname(struct acpi_namespace_node *node, buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; - status = acpi_ns_handle_to_pathname(node, &buffer); + status = acpi_ns_handle_to_pathname(node, &buffer, TRUE); if (ACPI_SUCCESS(status)) { if (message) { acpi_os_printf("%s ", message); @@ -596,6 +596,23 @@ void acpi_ns_terminate(void) ACPI_FUNCTION_TRACE(ns_terminate); +#ifdef ACPI_EXEC_APP + { + union acpi_operand_object *prev; + union acpi_operand_object *next; + + /* Delete any module-level code blocks */ + + next = acpi_gbl_module_code_list; + while (next) { + prev = next; + next = next->method.mutex; + prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */ + acpi_ut_remove_reference(prev); + } + } +#endif + /* * Free the entire namespace -- all nodes and all objects * attached to the nodes diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c index 9ff643b95..4b4d2f43d 100644 --- a/drivers/acpi/acpica/nsxfname.c +++ b/drivers/acpi/acpica/nsxfname.c @@ -172,11 +172,15 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer) return (status); } - if (name_type == ACPI_FULL_PATHNAME) { + if (name_type == ACPI_FULL_PATHNAME || + name_type == ACPI_FULL_PATHNAME_NO_TRAILING) { /* Get the full pathname (From the namespace root) */ - status = acpi_ns_handle_to_pathname(handle, buffer); + status = acpi_ns_handle_to_pathname(handle, buffer, + name_type == + ACPI_FULL_PATHNAME ? FALSE : + TRUE); return (status); } diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c index 6d0387705..29d8b7b01 100644 --- a/drivers/acpi/acpica/psargs.c +++ b/drivers/acpi/acpica/psargs.c @@ -287,7 +287,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state, "Control Method - %p Desc %p Path=%p\n", node, method_desc, path)); - name_op = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP); + name_op = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP, start); if (!name_op) { return_ACPI_STATUS(AE_NO_MEMORY); } @@ -484,7 +484,7 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state, static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state *parser_state) { - u32 aml_offset; + u8 *aml; union acpi_parse_object *field; union acpi_parse_object *arg = NULL; u16 opcode; @@ -498,8 +498,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state ACPI_FUNCTION_TRACE(ps_get_next_field); - aml_offset = - (u32)ACPI_PTR_DIFF(parser_state->aml, parser_state->aml_start); + aml = parser_state->aml; /* Determine field type */ @@ -536,13 +535,11 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state /* Allocate a new field op */ - field = acpi_ps_alloc_op(opcode); + field = acpi_ps_alloc_op(opcode, aml); if (!field) { return_PTR(NULL); } - field->common.aml_offset = aml_offset; - /* Decode the field type */ switch (opcode) { @@ -604,6 +601,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state * Argument for Connection operator can be either a Buffer * (resource descriptor), or a name_string. */ + aml = parser_state->aml; if (ACPI_GET8(parser_state->aml) == AML_BUFFER_OP) { parser_state->aml++; @@ -616,7 +614,8 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state /* Non-empty list */ - arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP); + arg = + acpi_ps_alloc_op(AML_INT_BYTELIST_OP, aml); if (!arg) { acpi_ps_free_op(field); return_PTR(NULL); @@ -665,7 +664,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state parser_state->aml = pkg_end; } else { - arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP); + arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP, aml); if (!arg) { acpi_ps_free_op(field); return_PTR(NULL); @@ -730,7 +729,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, /* Constants, strings, and namestrings are all the same size */ - arg = acpi_ps_alloc_op(AML_BYTE_OP); + arg = acpi_ps_alloc_op(AML_BYTE_OP, parser_state->aml); if (!arg) { return_ACPI_STATUS(AE_NO_MEMORY); } @@ -777,7 +776,8 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, /* Non-empty list */ - arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP); + arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP, + parser_state->aml); if (!arg) { return_ACPI_STATUS(AE_NO_MEMORY); } @@ -807,7 +807,9 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, /* null_name or name_string */ - arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP); + arg = + acpi_ps_alloc_op(AML_INT_NAMEPATH_OP, + parser_state->aml); if (!arg) { return_ACPI_STATUS(AE_NO_MEMORY); } diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index 90437227d..03ac8c9a6 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -51,6 +51,7 @@ #include #include "accommon.h" +#include "acinterp.h" #include "acparser.h" #include "acdispat.h" #include "amlcode.h" @@ -125,10 +126,7 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state, */ while (GET_CURRENT_ARG_TYPE(walk_state->arg_types) && !walk_state->arg_count) { - walk_state->aml_offset = - (u32) ACPI_PTR_DIFF(walk_state->parser_state.aml, - walk_state->parser_state. - aml_start); + walk_state->aml = walk_state->parser_state.aml; status = acpi_ps_get_next_arg(walk_state, @@ -140,7 +138,6 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state, } if (arg) { - arg->common.aml_offset = walk_state->aml_offset; acpi_ps_append_arg(op, arg); } @@ -324,6 +321,8 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op, union acpi_operand_object *method_obj; struct acpi_namespace_node *parent_node; + ACPI_FUNCTION_TRACE(ps_link_module_code); + /* Get the tail of the list */ prev = next = acpi_gbl_module_code_list; @@ -343,9 +342,13 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op, method_obj = acpi_ut_create_internal_object(ACPI_TYPE_METHOD); if (!method_obj) { - return; + return_VOID; } + ACPI_DEBUG_PRINT((ACPI_DB_PARSE, + "Create/Link new code block: %p\n", + method_obj)); + if (parent_op->common.node) { parent_node = parent_op->common.node; } else { @@ -370,8 +373,14 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op, prev->method.mutex = method_obj; } } else { + ACPI_DEBUG_PRINT((ACPI_DB_PARSE, + "Appending to existing code block: %p\n", + prev)); + prev->method.aml_length += aml_length; } + + return_VOID; } /******************************************************************************* @@ -494,16 +503,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) continue; } - op->common.aml_offset = walk_state->aml_offset; - - if (walk_state->op_info) { - ACPI_DEBUG_PRINT((ACPI_DB_PARSE, - "Opcode %4.4X [%s] Op %p Aml %p AmlOffset %5.5X\n", - (u32) op->common.aml_opcode, - walk_state->op_info->name, op, - parser_state->aml, - op->common.aml_offset)); - } + acpi_ex_start_trace_opcode(op, walk_state); } /* diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c index 2f5ddd806..e54bc2aa7 100644 --- a/drivers/acpi/acpica/psobject.c +++ b/drivers/acpi/acpica/psobject.c @@ -66,12 +66,11 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state); static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state) { + u32 aml_offset; ACPI_FUNCTION_TRACE_PTR(ps_get_aml_opcode, walk_state); - walk_state->aml_offset = - (u32)ACPI_PTR_DIFF(walk_state->parser_state.aml, - walk_state->parser_state.aml_start); + walk_state->aml = walk_state->parser_state.aml; walk_state->opcode = acpi_ps_peek_opcode(&(walk_state->parser_state)); /* @@ -98,10 +97,14 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state) /* The opcode is unrecognized. Complain and skip unknown opcodes */ if (walk_state->pass_number == 2) { + aml_offset = (u32)ACPI_PTR_DIFF(walk_state->aml, + walk_state-> + parser_state.aml_start); + ACPI_ERROR((AE_INFO, "Unknown opcode 0x%.2X at table offset 0x%.4X, ignoring", walk_state->opcode, - (u32)(walk_state->aml_offset + + (u32)(aml_offset + sizeof(struct acpi_table_header)))); ACPI_DUMP_BUFFER((walk_state->parser_state.aml - 16), @@ -115,14 +118,14 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state) acpi_os_printf ("/*\nError: Unknown opcode 0x%.2X at table offset 0x%.4X, context:\n", walk_state->opcode, - (u32)(walk_state->aml_offset + + (u32)(aml_offset + sizeof(struct acpi_table_header))); /* Dump the context surrounding the invalid opcode */ acpi_ut_dump_buffer(((u8 *)walk_state->parser_state. aml - 16), 48, DB_BYTE_DISPLAY, - (walk_state->aml_offset + + (aml_offset + sizeof(struct acpi_table_header) - 16)); acpi_os_printf(" */\n"); @@ -294,7 +297,7 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state, /* Create Op structure and append to parent's argument list */ walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode); - op = acpi_ps_alloc_op(walk_state->opcode); + op = acpi_ps_alloc_op(walk_state->opcode, aml_op_start); if (!op) { return_ACPI_STATUS(AE_NO_MEMORY); } diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c index a555f7f7b..98001d7f6 100644 --- a/drivers/acpi/acpica/psparse.c +++ b/drivers/acpi/acpica/psparse.c @@ -147,6 +147,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state, return_ACPI_STATUS(AE_OK); /* OK for now */ } + acpi_ex_stop_trace_opcode(op, walk_state); + /* Delete this op and the subtree below it if asked to */ if (((walk_state->parse_flags & ACPI_PARSE_TREE_MASK) != @@ -185,7 +187,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state, * op must be replaced by a placeholder return op */ replacement_op = - acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP); + acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP, + op->common.aml); if (!replacement_op) { status = AE_NO_MEMORY; } @@ -209,7 +212,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state, || (op->common.parent->common.aml_opcode == AML_VAR_PACKAGE_OP)) { replacement_op = - acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP); + acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP, + op->common.aml); if (!replacement_op) { status = AE_NO_MEMORY; } @@ -224,7 +228,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state, AML_VAR_PACKAGE_OP)) { replacement_op = acpi_ps_alloc_op(op->common. - aml_opcode); + aml_opcode, + op->common.aml); if (!replacement_op) { status = AE_NO_MEMORY; } else { @@ -240,7 +245,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state, default: replacement_op = - acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP); + acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP, + op->common.aml); if (!replacement_op) { status = AE_NO_MEMORY; } diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c index 324409120..183cc1efb 100644 --- a/drivers/acpi/acpica/psutils.c +++ b/drivers/acpi/acpica/psutils.c @@ -60,11 +60,11 @@ ACPI_MODULE_NAME("psutils") * DESCRIPTION: Create a Scope and associated namepath op with the root name * ******************************************************************************/ -union acpi_parse_object *acpi_ps_create_scope_op(void) +union acpi_parse_object *acpi_ps_create_scope_op(u8 *aml) { union acpi_parse_object *scope_op; - scope_op = acpi_ps_alloc_op(AML_SCOPE_OP); + scope_op = acpi_ps_alloc_op(AML_SCOPE_OP, aml); if (!scope_op) { return (NULL); } @@ -103,6 +103,7 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode) * FUNCTION: acpi_ps_alloc_op * * PARAMETERS: opcode - Opcode that will be stored in the new Op + * aml - Address of the opcode * * RETURN: Pointer to the new Op, null on failure * @@ -112,7 +113,7 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode) * ******************************************************************************/ -union acpi_parse_object *acpi_ps_alloc_op(u16 opcode) +union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml) { union acpi_parse_object *op; const struct acpi_opcode_info *op_info; @@ -149,6 +150,7 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode) if (op) { acpi_ps_init_op(op, opcode); + op->common.aml = aml; op->common.flags = flags; } diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c index 841a5ea06..4254805dd 100644 --- a/drivers/acpi/acpica/psxface.c +++ b/drivers/acpi/acpica/psxface.c @@ -47,15 +47,12 @@ #include "acdispat.h" #include "acinterp.h" #include "actables.h" +#include "acnamesp.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("psxface") /* Local Prototypes */ -static void acpi_ps_start_trace(struct acpi_evaluate_info *info); - -static void acpi_ps_stop_trace(struct acpi_evaluate_info *info); - static void acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action); @@ -76,7 +73,7 @@ acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action); ******************************************************************************/ acpi_status -acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags) +acpi_debug_trace(const char *name, u32 debug_level, u32 debug_layer, u32 flags) { acpi_status status; @@ -85,108 +82,14 @@ acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags) return (status); } - /* TBDs: Validate name, allow full path or just nameseg */ - - acpi_gbl_trace_method_name = *ACPI_CAST_PTR(u32, name); + acpi_gbl_trace_method_name = name; acpi_gbl_trace_flags = flags; - - if (debug_level) { - acpi_gbl_trace_dbg_level = debug_level; - } - if (debug_layer) { - acpi_gbl_trace_dbg_layer = debug_layer; - } + acpi_gbl_trace_dbg_level = debug_level; + acpi_gbl_trace_dbg_layer = debug_layer; + status = AE_OK; (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); - return (AE_OK); -} - -/******************************************************************************* - * - * FUNCTION: acpi_ps_start_trace - * - * PARAMETERS: info - Method info struct - * - * RETURN: None - * - * DESCRIPTION: Start control method execution trace - * - ******************************************************************************/ - -static void acpi_ps_start_trace(struct acpi_evaluate_info *info) -{ - acpi_status status; - - ACPI_FUNCTION_ENTRY(); - - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); - if (ACPI_FAILURE(status)) { - return; - } - - if ((!acpi_gbl_trace_method_name) || - (acpi_gbl_trace_method_name != info->node->name.integer)) { - goto exit; - } - - acpi_gbl_original_dbg_level = acpi_dbg_level; - acpi_gbl_original_dbg_layer = acpi_dbg_layer; - - acpi_dbg_level = 0x00FFFFFF; - acpi_dbg_layer = ACPI_UINT32_MAX; - - if (acpi_gbl_trace_dbg_level) { - acpi_dbg_level = acpi_gbl_trace_dbg_level; - } - if (acpi_gbl_trace_dbg_layer) { - acpi_dbg_layer = acpi_gbl_trace_dbg_layer; - } - -exit: - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); -} - -/******************************************************************************* - * - * FUNCTION: acpi_ps_stop_trace - * - * PARAMETERS: info - Method info struct - * - * RETURN: None - * - * DESCRIPTION: Stop control method execution trace - * - ******************************************************************************/ - -static void acpi_ps_stop_trace(struct acpi_evaluate_info *info) -{ - acpi_status status; - - ACPI_FUNCTION_ENTRY(); - - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); - if (ACPI_FAILURE(status)) { - return; - } - - if ((!acpi_gbl_trace_method_name) || - (acpi_gbl_trace_method_name != info->node->name.integer)) { - goto exit; - } - - /* Disable further tracing if type is one-shot */ - - if (acpi_gbl_trace_flags & 1) { - acpi_gbl_trace_method_name = 0; - acpi_gbl_trace_dbg_level = 0; - acpi_gbl_trace_dbg_layer = 0; - } - - acpi_dbg_level = acpi_gbl_original_dbg_level; - acpi_dbg_layer = acpi_gbl_original_dbg_layer; - -exit: - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + return (status); } /******************************************************************************* @@ -212,7 +115,7 @@ exit: * ******************************************************************************/ -acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info) +acpi_status acpi_ps_execute_method(struct acpi_evaluate_info * info) { acpi_status status; union acpi_parse_object *op; @@ -243,10 +146,6 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info) */ acpi_ps_update_parameter_list(info, REF_INCREMENT); - /* Begin tracing if requested */ - - acpi_ps_start_trace(info); - /* * Execute the method. Performs parse simultaneously */ @@ -256,7 +155,7 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info) /* Create and init a Root Node */ - op = acpi_ps_create_scope_op(); + op = acpi_ps_create_scope_op(info->obj_desc->method.aml_start); if (!op) { status = AE_NO_MEMORY; goto cleanup; @@ -326,10 +225,6 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info) cleanup: acpi_ps_delete_parse_tree(op); - /* End optional tracing */ - - acpi_ps_stop_trace(info); - /* Take away the extra reference that we gave the parameters above */ acpi_ps_update_parameter_list(info, REF_DECREMENT); diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c index 3fa829e96..a5344428f 100644 --- a/drivers/acpi/acpica/rscreate.c +++ b/drivers/acpi/acpica/rscreate.c @@ -348,7 +348,8 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, status = acpi_ns_handle_to_pathname((acpi_handle) node, - &path_buffer); + &path_buffer, + FALSE); /* +1 to include null terminator */ diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c index 6253001b6..a6454f4a6 100644 --- a/drivers/acpi/acpica/tbfadt.c +++ b/drivers/acpi/acpica/tbfadt.c @@ -298,7 +298,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64) * * FUNCTION: acpi_tb_parse_fadt * - * PARAMETERS: table_index - Index for the FADT + * PARAMETERS: None * * RETURN: None * @@ -307,7 +307,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64) * ******************************************************************************/ -void acpi_tb_parse_fadt(u32 table_index) +void acpi_tb_parse_fadt(void) { u32 length; struct acpi_table_header *table; @@ -319,11 +319,11 @@ void acpi_tb_parse_fadt(u32 table_index) * Get a local copy of the FADT and convert it to a common format * Map entire FADT, assumed to be smaller than one page. */ - length = acpi_gbl_root_table_list.tables[table_index].length; + length = acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index].length; table = - acpi_os_map_memory(acpi_gbl_root_table_list.tables[table_index]. - address, length); + acpi_os_map_memory(acpi_gbl_root_table_list. + tables[acpi_gbl_fadt_index].address, length); if (!table) { return; } @@ -345,7 +345,7 @@ void acpi_tb_parse_fadt(u32 table_index) /* Obtain the DSDT and FACS tables via their addresses within the FADT */ acpi_tb_install_fixed_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt, - ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT); + ACPI_SIG_DSDT, &acpi_gbl_dsdt_index); /* If Hardware Reduced flag is set, there is no FACS */ @@ -354,13 +354,13 @@ void acpi_tb_parse_fadt(u32 table_index) acpi_tb_install_fixed_table((acpi_physical_address) acpi_gbl_FADT.facs, ACPI_SIG_FACS, - ACPI_TABLE_INDEX_FACS); + &acpi_gbl_facs_index); } if (acpi_gbl_FADT.Xfacs) { acpi_tb_install_fixed_table((acpi_physical_address) acpi_gbl_FADT.Xfacs, ACPI_SIG_FACS, - ACPI_TABLE_INDEX_X_FACS); + &acpi_gbl_xfacs_index); } } } diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c index 119c84ad9..405529d49 100644 --- a/drivers/acpi/acpica/tbfind.c +++ b/drivers/acpi/acpica/tbfind.c @@ -68,12 +68,25 @@ acpi_status acpi_tb_find_table(char *signature, char *oem_id, char *oem_table_id, u32 *table_index) { - u32 i; acpi_status status; struct acpi_table_header header; + u32 i; ACPI_FUNCTION_TRACE(tb_find_table); + /* Validate the input table signature */ + + if (!acpi_is_valid_signature(signature)) { + return_ACPI_STATUS(AE_BAD_SIGNATURE); + } + + /* Don't allow the OEM strings to be too long */ + + if ((strlen(oem_id) > ACPI_OEM_ID_SIZE) || + (strlen(oem_table_id) > ACPI_OEM_TABLE_ID_SIZE)) { + return_ACPI_STATUS(AE_AML_STRING_LIMIT); + } + /* Normalize the input strings */ memset(&header, 0, sizeof(struct acpi_table_header)); diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 15ea98e00..6319b4242 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c @@ -100,9 +100,9 @@ acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index) * * FUNCTION: acpi_tb_install_table_with_override * - * PARAMETERS: table_index - Index into root table array - * new_table_desc - New table descriptor to install + * PARAMETERS: new_table_desc - New table descriptor to install * override - Whether override should be performed + * table_index - Where the table index is returned * * RETURN: None * @@ -114,12 +114,14 @@ acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index) ******************************************************************************/ void -acpi_tb_install_table_with_override(u32 table_index, - struct acpi_table_desc *new_table_desc, - u8 override) +acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc, + u8 override, u32 *table_index) { + u32 i; + acpi_status status; - if (table_index >= acpi_gbl_root_table_list.current_table_count) { + status = acpi_tb_get_next_table_descriptor(&i, NULL); + if (ACPI_FAILURE(status)) { return; } @@ -134,8 +136,7 @@ acpi_tb_install_table_with_override(u32 table_index, acpi_tb_override_table(new_table_desc); } - acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list. - tables[table_index], + acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list.tables[i], new_table_desc->address, new_table_desc->flags, new_table_desc->pointer); @@ -143,9 +144,13 @@ acpi_tb_install_table_with_override(u32 table_index, acpi_tb_print_table_header(new_table_desc->address, new_table_desc->pointer); + /* This synchronizes acpi_gbl_dsdt_index */ + + *table_index = i; + /* Set the global integer width (based upon revision of the DSDT) */ - if (table_index == ACPI_TABLE_INDEX_DSDT) { + if (i == acpi_gbl_dsdt_index) { acpi_ut_set_integer_width(new_table_desc->pointer->revision); } } @@ -157,7 +162,7 @@ acpi_tb_install_table_with_override(u32 table_index, * PARAMETERS: address - Physical address of DSDT or FACS * signature - Table signature, NULL if no need to * match - * table_index - Index into root table array + * table_index - Where the table index is returned * * RETURN: Status * @@ -168,7 +173,7 @@ acpi_tb_install_table_with_override(u32 table_index, acpi_status acpi_tb_install_fixed_table(acpi_physical_address address, - char *signature, u32 table_index) + char *signature, u32 *table_index) { struct acpi_table_desc new_table_desc; acpi_status status; @@ -200,7 +205,9 @@ acpi_tb_install_fixed_table(acpi_physical_address address, goto release_and_exit; } - acpi_tb_install_table_with_override(table_index, &new_table_desc, TRUE); + /* Add the table to the global root table list */ + + acpi_tb_install_table_with_override(&new_table_desc, TRUE, table_index); release_and_exit: @@ -355,13 +362,8 @@ acpi_tb_install_standard_table(acpi_physical_address address, /* Add the table to the global root table list */ - status = acpi_tb_get_next_table_descriptor(&i, NULL); - if (ACPI_FAILURE(status)) { - goto release_and_exit; - } - - *table_index = i; - acpi_tb_install_table_with_override(i, &new_table_desc, override); + acpi_tb_install_table_with_override(&new_table_desc, override, + table_index); release_and_exit: diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index 568ac0e4a..d8ddef38c 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -68,28 +68,27 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size); acpi_status acpi_tb_initialize_facs(void) { + struct acpi_table_facs *facs; /* If Hardware Reduced flag is set, there is no FACS */ if (acpi_gbl_reduced_hardware) { acpi_gbl_FACS = NULL; return (AE_OK); - } - - (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS, - ACPI_CAST_INDIRECT_PTR(struct - acpi_table_header, - &acpi_gbl_facs32)); - (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_X_FACS, - ACPI_CAST_INDIRECT_PTR(struct - acpi_table_header, - &acpi_gbl_facs64)); - - if (acpi_gbl_facs64 - && (!acpi_gbl_facs32 || !acpi_gbl_use32_bit_facs_addresses)) { - acpi_gbl_FACS = acpi_gbl_facs64; - } else if (acpi_gbl_facs32) { - acpi_gbl_FACS = acpi_gbl_facs32; + } else if (acpi_gbl_FADT.Xfacs && + (!acpi_gbl_FADT.facs + || !acpi_gbl_use32_bit_facs_addresses)) { + (void)acpi_get_table_by_index(acpi_gbl_xfacs_index, + ACPI_CAST_INDIRECT_PTR(struct + acpi_table_header, + &facs)); + acpi_gbl_FACS = facs; + } else if (acpi_gbl_FADT.facs) { + (void)acpi_get_table_by_index(acpi_gbl_facs_index, + ACPI_CAST_INDIRECT_PTR(struct + acpi_table_header, + &facs)); + acpi_gbl_FACS = facs; } /* If there is no FACS, just continue. There was already an error msg */ @@ -98,29 +97,6 @@ acpi_status acpi_tb_initialize_facs(void) } #endif /* !ACPI_REDUCED_HARDWARE */ -/******************************************************************************* - * - * FUNCTION: acpi_tb_tables_loaded - * - * PARAMETERS: None - * - * RETURN: TRUE if required ACPI tables are loaded - * - * DESCRIPTION: Determine if the minimum required ACPI tables are present - * (FADT, FACS, DSDT) - * - ******************************************************************************/ - -u8 acpi_tb_tables_loaded(void) -{ - - if (acpi_gbl_root_table_list.current_table_count >= 4) { - return (TRUE); - } - - return (FALSE); -} - /******************************************************************************* * * FUNCTION: acpi_tb_check_dsdt_header @@ -192,7 +168,7 @@ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index) acpi_tb_uninstall_table(table_desc); acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list. - tables[ACPI_TABLE_INDEX_DSDT], + tables[acpi_gbl_dsdt_index], ACPI_PTR_TO_PHYSADDR(new_table), ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL, new_table); @@ -369,13 +345,6 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address) table_entry_size); table_entry = ACPI_ADD_PTR(u8, table, sizeof(struct acpi_table_header)); - /* - * First three entries in the table array are reserved for the DSDT - * and 32bit/64bit FACS, which are not actually present in the - * RSDT/XSDT - they come from the FADT - */ - acpi_gbl_root_table_list.current_table_count = 3; - /* Initialize the root table array from the RSDT/XSDT */ for (i = 0; i < table_count; i++) { @@ -400,7 +369,8 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address) ACPI_COMPARE_NAME(&acpi_gbl_root_table_list. tables[table_index].signature, ACPI_SIG_FADT)) { - acpi_tb_parse_fadt(table_index); + acpi_gbl_fadt_index = table_index; + acpi_tb_parse_fadt(); } next_table: @@ -412,3 +382,36 @@ next_table: return_ACPI_STATUS(AE_OK); } + +/******************************************************************************* + * + * FUNCTION: acpi_is_valid_signature + * + * PARAMETERS: signature - Sig string to be validated + * + * RETURN: TRUE if signature is correct length and has valid characters + * + * DESCRIPTION: Validate an ACPI table signature. + * + ******************************************************************************/ + +u8 acpi_is_valid_signature(char *signature) +{ + u32 i; + + /* Validate the signature length */ + + if (strlen(signature) != ACPI_NAME_SIZE) { + return (FALSE); + } + + /* Validate each character in the signature */ + + for (i = 0; i < ACPI_NAME_SIZE; i++) { + if (!acpi_ut_valid_acpi_char(signature[i], i)) { + return (FALSE); + } + } + + return (TRUE); +} diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c index 9682d40ca..55ee14ca9 100644 --- a/drivers/acpi/acpica/tbxfload.c +++ b/drivers/acpi/acpica/tbxfload.c @@ -51,9 +51,6 @@ #define _COMPONENT ACPI_TABLES ACPI_MODULE_NAME("tbxfload") -/* Local prototypes */ -static acpi_status acpi_tb_load_namespace(void); - /******************************************************************************* * * FUNCTION: acpi_load_tables @@ -65,7 +62,6 @@ static acpi_status acpi_tb_load_namespace(void); * DESCRIPTION: Load the ACPI tables from the RSDT/XSDT * ******************************************************************************/ - acpi_status __init acpi_load_tables(void) { acpi_status status; @@ -75,6 +71,13 @@ acpi_status __init acpi_load_tables(void) /* Load the namespace from the tables */ status = acpi_tb_load_namespace(); + + /* Don't let single failures abort the load */ + + if (status == AE_CTRL_TERMINATE) { + status = AE_OK; + } + if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "While loading namespace from ACPI tables")); @@ -97,11 +100,14 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_load_tables) * the RSDT/XSDT. * ******************************************************************************/ -static acpi_status acpi_tb_load_namespace(void) +acpi_status acpi_tb_load_namespace(void) { acpi_status status; u32 i; struct acpi_table_header *new_dsdt; + struct acpi_table_desc *table; + u32 tables_loaded = 0; + u32 tables_failed = 0; ACPI_FUNCTION_TRACE(tb_load_namespace); @@ -111,15 +117,11 @@ static acpi_status acpi_tb_load_namespace(void) * Load the namespace. The DSDT is required, but any SSDT and * PSDT tables are optional. Verify the DSDT. */ + table = &acpi_gbl_root_table_list.tables[acpi_gbl_dsdt_index]; + if (!acpi_gbl_root_table_list.current_table_count || - !ACPI_COMPARE_NAME(& - (acpi_gbl_root_table_list. - tables[ACPI_TABLE_INDEX_DSDT].signature), - ACPI_SIG_DSDT) - || - ACPI_FAILURE(acpi_tb_validate_table - (&acpi_gbl_root_table_list. - tables[ACPI_TABLE_INDEX_DSDT]))) { + !ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_DSDT) || + ACPI_FAILURE(acpi_tb_validate_table(table))) { status = AE_NO_ACPI_TABLES; goto unlock_and_exit; } @@ -130,8 +132,7 @@ static acpi_status acpi_tb_load_namespace(void) * array can change dynamically as tables are loaded at run-time. Note: * .Pointer field is not validated until after call to acpi_tb_validate_table. */ - acpi_gbl_DSDT = - acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer; + acpi_gbl_DSDT = table->pointer; /* * Optionally copy the entire DSDT to local memory (instead of simply @@ -140,7 +141,7 @@ static acpi_status acpi_tb_load_namespace(void) * the DSDT. */ if (acpi_gbl_copy_dsdt_locally) { - new_dsdt = acpi_tb_copy_dsdt(ACPI_TABLE_INDEX_DSDT); + new_dsdt = acpi_tb_copy_dsdt(acpi_gbl_dsdt_index); if (new_dsdt) { acpi_gbl_DSDT = new_dsdt; } @@ -157,41 +158,65 @@ static acpi_status acpi_tb_load_namespace(void) /* Load and parse tables */ - status = acpi_ns_load_table(ACPI_TABLE_INDEX_DSDT, acpi_gbl_root_node); + status = acpi_ns_load_table(acpi_gbl_dsdt_index, acpi_gbl_root_node); if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); + ACPI_EXCEPTION((AE_INFO, status, "[DSDT] table load failed")); + tables_failed++; + } else { + tables_loaded++; } /* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) { + table = &acpi_gbl_root_table_list.tables[i]; + if (!acpi_gbl_root_table_list.tables[i].address || - (!ACPI_COMPARE_NAME - (&(acpi_gbl_root_table_list.tables[i].signature), - ACPI_SIG_SSDT) - && - !ACPI_COMPARE_NAME(& - (acpi_gbl_root_table_list.tables[i]. - signature), ACPI_SIG_PSDT) - && - !ACPI_COMPARE_NAME(& - (acpi_gbl_root_table_list.tables[i]. - signature), ACPI_SIG_OSDT)) - || - ACPI_FAILURE(acpi_tb_validate_table - (&acpi_gbl_root_table_list.tables[i]))) { + (!ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_SSDT) + && !ACPI_COMPARE_NAME(table->signature.ascii, + ACPI_SIG_PSDT) + && !ACPI_COMPARE_NAME(table->signature.ascii, + ACPI_SIG_OSDT)) + || ACPI_FAILURE(acpi_tb_validate_table(table))) { continue; } /* Ignore errors while loading tables, get as many as possible */ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); - (void)acpi_ns_load_table(i, acpi_gbl_root_node); + status = acpi_ns_load_table(i, acpi_gbl_root_node); + if (ACPI_FAILURE(status)) { + ACPI_EXCEPTION((AE_INFO, status, + "(%4.4s:%8.8s) while loading table", + table->signature.ascii, + table->pointer->oem_table_id)); + tables_failed++; + + ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, + "Table [%4.4s:%8.8s] (id FF) - Table namespace load failed\n\n", + table->signature.ascii, + table->pointer->oem_table_id)); + } else { + tables_loaded++; + } + (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); } - ACPI_INFO((AE_INFO, "All ACPI Tables successfully acquired")); + if (!tables_failed) { + ACPI_INFO((AE_INFO, + "%u ACPI AML tables successfully acquired and loaded", + tables_loaded)); + } else { + ACPI_ERROR((AE_INFO, + "%u table load failures, %u successful", + tables_failed, tables_loaded)); + + /* Indicate at least one failure */ + + status = AE_CTRL_TERMINATE; + } unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c index cd0269384..414622910 100644 --- a/drivers/acpi/acpica/utdebug.c +++ b/drivers/acpi/acpica/utdebug.c @@ -45,6 +45,7 @@ #include #include "accommon.h" +#include "acinterp.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utdebug") @@ -560,8 +561,37 @@ acpi_ut_ptr_exit(u32 line_number, } } +/******************************************************************************* + * + * FUNCTION: acpi_trace_point + * + * PARAMETERS: type - Trace event type + * begin - TRUE if before execution + * aml - Executed AML address + * pathname - Object path + * pointer - Pointer to the related object + * + * RETURN: None + * + * DESCRIPTION: Interpreter execution trace. + * + ******************************************************************************/ + +void +acpi_trace_point(acpi_trace_event_type type, u8 begin, u8 *aml, char *pathname) +{ + + ACPI_FUNCTION_ENTRY(); + + acpi_ex_trace_point(type, begin, aml, pathname); + +#ifdef ACPI_USE_SYSTEM_TRACER + acpi_os_trace_point(type, begin, aml, pathname); #endif +} +ACPI_EXPORT_SYMBOL(acpi_trace_point) +#endif #ifdef ACPI_APPLICATION /******************************************************************************* * @@ -575,7 +605,6 @@ acpi_ut_ptr_exit(u32 line_number, * DESCRIPTION: Print error message to the console, used by applications. * ******************************************************************************/ - void ACPI_INTERNAL_VAR_XFACE acpi_log_error(const char *format, ...) { va_list args; diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c index 71fce389f..1638312e3 100644 --- a/drivers/acpi/acpica/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c @@ -209,6 +209,9 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) acpi_ut_delete_object_desc(object->method.mutex); object->method.mutex = NULL; } + if (object->method.node) { + object->method.node = NULL; + } break; case ACPI_TYPE_REGION: diff --git a/drivers/acpi/acpica/utfileio.c b/drivers/acpi/acpica/utfileio.c index 857af8243..75a94f52b 100644 --- a/drivers/acpi/acpica/utfileio.c +++ b/drivers/acpi/acpica/utfileio.c @@ -312,7 +312,7 @@ acpi_ut_read_table_from_file(char *filename, struct acpi_table_header ** table) /* Get the entire file */ fprintf(stderr, - "Reading ACPI table from file %10s - Length %.8u (0x%06X)\n", + "Reading ACPI table from file %12s - Length %.8u (0x%06X)\n", filename, file_size, file_size); status = acpi_ut_read_table(file, table, &table_length); diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c index e402e07b4..28ab3a1d5 100644 --- a/drivers/acpi/acpica/utinit.c +++ b/drivers/acpi/acpica/utinit.c @@ -204,11 +204,10 @@ acpi_status acpi_ut_init_globals(void) acpi_gbl_acpi_hardware_present = TRUE; acpi_gbl_last_owner_id_index = 0; acpi_gbl_next_owner_id_offset = 0; - acpi_gbl_trace_dbg_level = 0; - acpi_gbl_trace_dbg_layer = 0; acpi_gbl_debugger_configuration = DEBUGGER_THREADING; acpi_gbl_osi_mutex = NULL; acpi_gbl_reg_methods_executed = FALSE; + acpi_gbl_max_loop_iterations = 0xFFFF; /* Hardware oriented */ diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c index 71b66537f..bd4443bdc 100644 --- a/drivers/acpi/acpica/utmisc.c +++ b/drivers/acpi/acpica/utmisc.c @@ -75,7 +75,7 @@ u8 acpi_ut_is_pci_root_bridge(char *id) return (FALSE); } -#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP) +#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP || defined ACPI_NAMES_APP) /******************************************************************************* * * FUNCTION: acpi_ut_is_aml_table @@ -376,7 +376,7 @@ acpi_ut_display_init_pathname(u8 type, /* Get the full pathname to the node */ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; - status = acpi_ns_handle_to_pathname(obj_handle, &buffer); + status = acpi_ns_handle_to_pathname(obj_handle, &buffer, TRUE); if (ACPI_FAILURE(status)) { return; } diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c new file mode 100644 index 000000000..1d5f6b17b --- /dev/null +++ b/drivers/acpi/acpica/utnonansi.c @@ -0,0 +1,380 @@ +/******************************************************************************* + * + * Module Name: utnonansi - Non-ansi C library functions + * + ******************************************************************************/ + +/* + * Copyright (C) 2000 - 2015, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include +#include "accommon.h" + +#define _COMPONENT ACPI_UTILITIES +ACPI_MODULE_NAME("utnonansi") + +/* + * Non-ANSI C library functions - strlwr, strupr, stricmp, and a 64-bit + * version of strtoul. + */ +/******************************************************************************* + * + * FUNCTION: acpi_ut_strlwr (strlwr) + * + * PARAMETERS: src_string - The source string to convert + * + * RETURN: None + * + * DESCRIPTION: Convert a string to lowercase + * + ******************************************************************************/ +void acpi_ut_strlwr(char *src_string) +{ + char *string; + + ACPI_FUNCTION_ENTRY(); + + if (!src_string) { + return; + } + + /* Walk entire string, lowercasing the letters */ + + for (string = src_string; *string; string++) { + *string = (char)tolower((int)*string); + } +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_strupr (strupr) + * + * PARAMETERS: src_string - The source string to convert + * + * RETURN: None + * + * DESCRIPTION: Convert a string to uppercase + * + ******************************************************************************/ + +void acpi_ut_strupr(char *src_string) +{ + char *string; + + ACPI_FUNCTION_ENTRY(); + + if (!src_string) { + return; + } + + /* Walk entire string, uppercasing the letters */ + + for (string = src_string; *string; string++) { + *string = (char)toupper((int)*string); + } +} + +/****************************************************************************** + * + * FUNCTION: acpi_ut_stricmp (stricmp) + * + * PARAMETERS: string1 - first string to compare + * string2 - second string to compare + * + * RETURN: int that signifies string relationship. Zero means strings + * are equal. + * + * DESCRIPTION: Case-insensitive string compare. Implementation of the + * non-ANSI stricmp function. + * + ******************************************************************************/ + +int acpi_ut_stricmp(char *string1, char *string2) +{ + int c1; + int c2; + + do { + c1 = tolower((int)*string1); + c2 = tolower((int)*string2); + + string1++; + string2++; + } + while ((c1 == c2) && (c1)); + + return (c1 - c2); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_strtoul64 + * + * PARAMETERS: string - Null terminated string + * base - Radix of the string: 16 or ACPI_ANY_BASE; + * ACPI_ANY_BASE means 'in behalf of to_integer' + * ret_integer - Where the converted integer is returned + * + * RETURN: Status and Converted value + * + * DESCRIPTION: Convert a string into an unsigned value. Performs either a + * 32-bit or 64-bit conversion, depending on the current mode + * of the interpreter. + * + * NOTE: Does not support Octal strings, not needed. + * + ******************************************************************************/ + +acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer) +{ + u32 this_digit = 0; + u64 return_value = 0; + u64 quotient; + u64 dividend; + u32 to_integer_op = (base == ACPI_ANY_BASE); + u32 mode32 = (acpi_gbl_integer_byte_width == 4); + u8 valid_digits = 0; + u8 sign_of0x = 0; + u8 term = 0; + + ACPI_FUNCTION_TRACE_STR(ut_stroul64, string); + + switch (base) { + case ACPI_ANY_BASE: + case 16: + + break; + + default: + + /* Invalid Base */ + + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + if (!string) { + goto error_exit; + } + + /* Skip over any white space in the buffer */ + + while ((*string) && (isspace((int)*string) || *string == '\t')) { + string++; + } + + if (to_integer_op) { + /* + * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'. + * We need to determine if it is decimal or hexadecimal. + */ + if ((*string == '0') && (tolower((int)*(string + 1)) == 'x')) { + sign_of0x = 1; + base = 16; + + /* Skip over the leading '0x' */ + string += 2; + } else { + base = 10; + } + } + + /* Any string left? Check that '0x' is not followed by white space. */ + + if (!(*string) || isspace((int)*string) || *string == '\t') { + if (to_integer_op) { + goto error_exit; + } else { + goto all_done; + } + } + + /* + * Perform a 32-bit or 64-bit conversion, depending upon the current + * execution mode of the interpreter + */ + dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX; + + /* Main loop: convert the string to a 32- or 64-bit integer */ + + while (*string) { + if (isdigit((int)*string)) { + + /* Convert ASCII 0-9 to Decimal value */ + + this_digit = ((u8)*string) - '0'; + } else if (base == 10) { + + /* Digit is out of range; possible in to_integer case only */ + + term = 1; + } else { + this_digit = (u8)toupper((int)*string); + if (isxdigit((int)this_digit)) { + + /* Convert ASCII Hex char to value */ + + this_digit = this_digit - 'A' + 10; + } else { + term = 1; + } + } + + if (term) { + if (to_integer_op) { + goto error_exit; + } else { + break; + } + } else if ((valid_digits == 0) && (this_digit == 0) + && !sign_of0x) { + + /* Skip zeros */ + string++; + continue; + } + + valid_digits++; + + if (sign_of0x + && ((valid_digits > 16) + || ((valid_digits > 8) && mode32))) { + /* + * This is to_integer operation case. + * No any restrictions for string-to-integer conversion, + * see ACPI spec. + */ + goto error_exit; + } + + /* Divide the digit into the correct position */ + + (void)acpi_ut_short_divide((dividend - (u64)this_digit), + base, "ient, NULL); + + if (return_value > quotient) { + if (to_integer_op) { + goto error_exit; + } else { + break; + } + } + + return_value *= base; + return_value += this_digit; + string++; + } + + /* All done, normal exit */ + +all_done: + + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n", + ACPI_FORMAT_UINT64(return_value))); + + *ret_integer = return_value; + return_ACPI_STATUS(AE_OK); + +error_exit: + /* Base was set/validated above */ + + if (base == 10) { + return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT); + } else { + return_ACPI_STATUS(AE_BAD_HEX_CONSTANT); + } +} + +#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION) +/******************************************************************************* + * + * FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat + * + * PARAMETERS: Adds a "DestSize" parameter to each of the standard string + * functions. This is the size of the Destination buffer. + * + * RETURN: TRUE if the operation would overflow the destination buffer. + * + * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that + * the result of the operation will not overflow the output string + * buffer. + * + * NOTE: These functions are typically only helpful for processing + * user input and command lines. For most ACPICA code, the + * required buffer length is precisely calculated before buffer + * allocation, so the use of these functions is unnecessary. + * + ******************************************************************************/ + +u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source) +{ + + if (strlen(source) >= dest_size) { + return (TRUE); + } + + strcpy(dest, source); + return (FALSE); +} + +u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source) +{ + + if ((strlen(dest) + strlen(source)) >= dest_size) { + return (TRUE); + } + + strcat(dest, source); + return (FALSE); +} + +u8 +acpi_ut_safe_strncat(char *dest, + acpi_size dest_size, + char *source, acpi_size max_transfer_length) +{ + acpi_size actual_transfer_length; + + actual_transfer_length = ACPI_MIN(max_transfer_length, strlen(source)); + + if ((strlen(dest) + actual_transfer_length) >= dest_size) { + return (TRUE); + } + + strncat(dest, source, max_transfer_length); + return (FALSE); +} +#endif diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c index 8f3c883df..4ddd105d9 100644 --- a/drivers/acpi/acpica/utstring.c +++ b/drivers/acpi/acpica/utstring.c @@ -48,286 +48,6 @@ #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utstring") -/* - * Non-ANSI C library functions - strlwr, strupr, stricmp, and a 64-bit - * version of strtoul. - */ -#ifdef ACPI_ASL_COMPILER -/******************************************************************************* - * - * FUNCTION: acpi_ut_strlwr (strlwr) - * - * PARAMETERS: src_string - The source string to convert - * - * RETURN: None - * - * DESCRIPTION: Convert string to lowercase - * - * NOTE: This is not a POSIX function, so it appears here, not in utclib.c - * - ******************************************************************************/ -void acpi_ut_strlwr(char *src_string) -{ - char *string; - - ACPI_FUNCTION_ENTRY(); - - if (!src_string) { - return; - } - - /* Walk entire string, lowercasing the letters */ - - for (string = src_string; *string; string++) { - *string = (char)tolower((int)*string); - } - - return; -} - -/****************************************************************************** - * - * FUNCTION: acpi_ut_stricmp (stricmp) - * - * PARAMETERS: string1 - first string to compare - * string2 - second string to compare - * - * RETURN: int that signifies string relationship. Zero means strings - * are equal. - * - * DESCRIPTION: Implementation of the non-ANSI stricmp function (compare - * strings with no case sensitivity) - * - ******************************************************************************/ - -int acpi_ut_stricmp(char *string1, char *string2) -{ - int c1; - int c2; - - do { - c1 = tolower((int)*string1); - c2 = tolower((int)*string2); - - string1++; - string2++; - } - while ((c1 == c2) && (c1)); - - return (c1 - c2); -} -#endif - -/******************************************************************************* - * - * FUNCTION: acpi_ut_strupr (strupr) - * - * PARAMETERS: src_string - The source string to convert - * - * RETURN: None - * - * DESCRIPTION: Convert string to uppercase - * - * NOTE: This is not a POSIX function, so it appears here, not in utclib.c - * - ******************************************************************************/ - -void acpi_ut_strupr(char *src_string) -{ - char *string; - - ACPI_FUNCTION_ENTRY(); - - if (!src_string) { - return; - } - - /* Walk entire string, uppercasing the letters */ - - for (string = src_string; *string; string++) { - *string = (char)toupper((int)*string); - } - - return; -} - -/******************************************************************************* - * - * FUNCTION: acpi_ut_strtoul64 - * - * PARAMETERS: string - Null terminated string - * base - Radix of the string: 16 or ACPI_ANY_BASE; - * ACPI_ANY_BASE means 'in behalf of to_integer' - * ret_integer - Where the converted integer is returned - * - * RETURN: Status and Converted value - * - * DESCRIPTION: Convert a string into an unsigned value. Performs either a - * 32-bit or 64-bit conversion, depending on the current mode - * of the interpreter. - * NOTE: Does not support Octal strings, not needed. - * - ******************************************************************************/ - -acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer) -{ - u32 this_digit = 0; - u64 return_value = 0; - u64 quotient; - u64 dividend; - u32 to_integer_op = (base == ACPI_ANY_BASE); - u32 mode32 = (acpi_gbl_integer_byte_width == 4); - u8 valid_digits = 0; - u8 sign_of0x = 0; - u8 term = 0; - - ACPI_FUNCTION_TRACE_STR(ut_stroul64, string); - - switch (base) { - case ACPI_ANY_BASE: - case 16: - - break; - - default: - - /* Invalid Base */ - - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - if (!string) { - goto error_exit; - } - - /* Skip over any white space in the buffer */ - - while ((*string) && (isspace((int)*string) || *string == '\t')) { - string++; - } - - if (to_integer_op) { - /* - * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'. - * We need to determine if it is decimal or hexadecimal. - */ - if ((*string == '0') && (tolower((int)*(string + 1)) == 'x')) { - sign_of0x = 1; - base = 16; - - /* Skip over the leading '0x' */ - string += 2; - } else { - base = 10; - } - } - - /* Any string left? Check that '0x' is not followed by white space. */ - - if (!(*string) || isspace((int)*string) || *string == '\t') { - if (to_integer_op) { - goto error_exit; - } else { - goto all_done; - } - } - - /* - * Perform a 32-bit or 64-bit conversion, depending upon the current - * execution mode of the interpreter - */ - dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX; - - /* Main loop: convert the string to a 32- or 64-bit integer */ - - while (*string) { - if (isdigit((int)*string)) { - - /* Convert ASCII 0-9 to Decimal value */ - - this_digit = ((u8)*string) - '0'; - } else if (base == 10) { - - /* Digit is out of range; possible in to_integer case only */ - - term = 1; - } else { - this_digit = (u8)toupper((int)*string); - if (isxdigit((int)this_digit)) { - - /* Convert ASCII Hex char to value */ - - this_digit = this_digit - 'A' + 10; - } else { - term = 1; - } - } - - if (term) { - if (to_integer_op) { - goto error_exit; - } else { - break; - } - } else if ((valid_digits == 0) && (this_digit == 0) - && !sign_of0x) { - - /* Skip zeros */ - string++; - continue; - } - - valid_digits++; - - if (sign_of0x - && ((valid_digits > 16) - || ((valid_digits > 8) && mode32))) { - /* - * This is to_integer operation case. - * No any restrictions for string-to-integer conversion, - * see ACPI spec. - */ - goto error_exit; - } - - /* Divide the digit into the correct position */ - - (void)acpi_ut_short_divide((dividend - (u64)this_digit), - base, "ient, NULL); - - if (return_value > quotient) { - if (to_integer_op) { - goto error_exit; - } else { - break; - } - } - - return_value *= base; - return_value += this_digit; - string++; - } - - /* All done, normal exit */ - -all_done: - - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n", - ACPI_FORMAT_UINT64(return_value))); - - *ret_integer = return_value; - return_ACPI_STATUS(AE_OK); - -error_exit: - /* Base was set/validated above */ - - if (base == 10) { - return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT); - } else { - return_ACPI_STATUS(AE_BAD_HEX_CONSTANT); - } -} - /******************************************************************************* * * FUNCTION: acpi_ut_print_string @@ -342,7 +62,6 @@ error_exit: * sequences. * ******************************************************************************/ - void acpi_ut_print_string(char *string, u16 max_length) { u32 i; @@ -584,64 +303,3 @@ void ut_convert_backslashes(char *pathname) } } #endif - -#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION) -/******************************************************************************* - * - * FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat - * - * PARAMETERS: Adds a "DestSize" parameter to each of the standard string - * functions. This is the size of the Destination buffer. - * - * RETURN: TRUE if the operation would overflow the destination buffer. - * - * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that - * the result of the operation will not overflow the output string - * buffer. - * - * NOTE: These functions are typically only helpful for processing - * user input and command lines. For most ACPICA code, the - * required buffer length is precisely calculated before buffer - * allocation, so the use of these functions is unnecessary. - * - ******************************************************************************/ - -u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source) -{ - - if (strlen(source) >= dest_size) { - return (TRUE); - } - - strcpy(dest, source); - return (FALSE); -} - -u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source) -{ - - if ((strlen(dest) + strlen(source)) >= dest_size) { - return (TRUE); - } - - strcat(dest, source); - return (FALSE); -} - -u8 -acpi_ut_safe_strncat(char *dest, - acpi_size dest_size, - char *source, acpi_size max_transfer_length) -{ - acpi_size actual_transfer_length; - - actual_transfer_length = ACPI_MIN(max_transfer_length, strlen(source)); - - if ((strlen(dest) + actual_transfer_length) >= dest_size) { - return (TRUE); - } - - strncat(dest, source, max_transfer_length); - return (FALSE); -} -#endif diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c index 51cf52d52..4f332815d 100644 --- a/drivers/acpi/acpica/utxface.c +++ b/drivers/acpi/acpica/utxface.c @@ -92,13 +92,6 @@ acpi_status __init acpi_terminate(void) acpi_ut_mutex_terminate(); -#ifdef ACPI_DEBUGGER - - /* Shut down the debugger */ - - acpi_db_terminate(); -#endif - /* Now we can shutdown the OS-dependent layer */ status = acpi_os_terminate(); @@ -517,7 +510,8 @@ acpi_decode_pld_buffer(u8 *in_buffer, /* Parameter validation */ - if (!in_buffer || !return_buffer || (length < 16)) { + if (!in_buffer || !return_buffer + || (length < ACPI_PLD_REV1_BUFFER_SIZE)) { return (AE_BAD_PARAMETER); } @@ -567,7 +561,7 @@ acpi_decode_pld_buffer(u8 *in_buffer, pld_info->rotation = ACPI_PLD_GET_ROTATION(&dword); pld_info->order = ACPI_PLD_GET_ORDER(&dword); - if (length >= ACPI_PLD_BUFFER_SIZE) { + if (length >= ACPI_PLD_REV2_BUFFER_SIZE) { /* Fifth 32-bit DWord (Revision 2 of _PLD) */ diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c index 42a32a66e..a7137ec28 100644 --- a/drivers/acpi/acpica/utxfinit.c +++ b/drivers/acpi/acpica/utxfinit.c @@ -124,17 +124,6 @@ acpi_status __init acpi_initialize_subsystem(void) return_ACPI_STATUS(status); } - /* If configured, initialize the AML debugger */ - -#ifdef ACPI_DEBUGGER - status = acpi_db_initialize(); - if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, - "During Debugger initialization")); - return_ACPI_STATUS(status); - } -#endif - return_ACPI_STATUS(AE_OK); } diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index a85ac07f3..a2c8d7adb 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -24,10 +24,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index a095d4f85..043188365 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c @@ -18,10 +18,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c index 04ab5c9d3..6330f557a 100644 --- a/drivers/acpi/apei/erst-dbg.c +++ b/drivers/acpi/apei/erst-dbg.c @@ -17,10 +17,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 3670bbab5..6682c5daf 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -18,10 +18,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 2bfd53cbf..23981ac1c 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -23,10 +23,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index 06e9b411a..20b3fcf40 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c @@ -21,10 +21,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index b3628cc01..b719ab309 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -18,10 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index 2b5b5e421..c727e3972 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c @@ -20,10 +20,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 513e7230e..a212cefae 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -15,10 +15,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ @@ -319,14 +315,10 @@ static void acpi_bus_osc_support(void) capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE; capbuf[OSC_SUPPORT_DWORD] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */ -#if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\ - defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE) - capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT; -#endif - -#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) - capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT; -#endif + if (IS_ENABLED(CONFIG_ACPI_PROCESSOR_AGGREGATOR)) + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT; + if (IS_ENABLED(CONFIG_ACPI_PROCESSOR)) + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT; capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT; @@ -423,6 +415,406 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) acpi_evaluate_ost(handle, type, ost_code, NULL); } +static void acpi_device_notify(acpi_handle handle, u32 event, void *data) +{ + struct acpi_device *device = data; + + device->driver->ops.notify(device, event); +} + +static void acpi_device_notify_fixed(void *data) +{ + struct acpi_device *device = data; + + /* Fixed hardware devices have no handles */ + acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device); +} + +static u32 acpi_device_fixed_event(void *data) +{ + acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data); + return ACPI_INTERRUPT_HANDLED; +} + +static int acpi_device_install_notify_handler(struct acpi_device *device) +{ + acpi_status status; + + if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) + status = + acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, + acpi_device_fixed_event, + device); + else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) + status = + acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, + acpi_device_fixed_event, + device); + else + status = acpi_install_notify_handler(device->handle, + ACPI_DEVICE_NOTIFY, + acpi_device_notify, + device); + + if (ACPI_FAILURE(status)) + return -EINVAL; + return 0; +} + +static void acpi_device_remove_notify_handler(struct acpi_device *device) +{ + if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) + acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, + acpi_device_fixed_event); + else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) + acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, + acpi_device_fixed_event); + else + acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, + acpi_device_notify); +} + +/* -------------------------------------------------------------------------- + Device Matching + -------------------------------------------------------------------------- */ + +static struct acpi_device *acpi_primary_dev_companion(struct acpi_device *adev, + const struct device *dev) +{ + struct mutex *physical_node_lock = &adev->physical_node_lock; + + mutex_lock(physical_node_lock); + if (list_empty(&adev->physical_node_list)) { + adev = NULL; + } else { + const struct acpi_device_physical_node *node; + + node = list_first_entry(&adev->physical_node_list, + struct acpi_device_physical_node, node); + if (node->dev != dev) + adev = NULL; + } + mutex_unlock(physical_node_lock); + return adev; +} + +/** + * acpi_device_is_first_physical_node - Is given dev first physical node + * @adev: ACPI companion device + * @dev: Physical device to check + * + * Function checks if given @dev is the first physical devices attached to + * the ACPI companion device. This distinction is needed in some cases + * where the same companion device is shared between many physical devices. + * + * Note that the caller have to provide valid @adev pointer. + */ +bool acpi_device_is_first_physical_node(struct acpi_device *adev, + const struct device *dev) +{ + return !!acpi_primary_dev_companion(adev, dev); +} + +/* + * acpi_companion_match() - Can we match via ACPI companion device + * @dev: Device in question + * + * Check if the given device has an ACPI companion and if that companion has + * a valid list of PNP IDs, and if the device is the first (primary) physical + * device associated with it. Return the companion pointer if that's the case + * or NULL otherwise. + * + * If multiple physical devices are attached to a single ACPI companion, we need + * to be careful. The usage scenario for this kind of relationship is that all + * of the physical devices in question use resources provided by the ACPI + * companion. A typical case is an MFD device where all the sub-devices share + * the parent's ACPI companion. In such cases we can only allow the primary + * (first) physical device to be matched with the help of the companion's PNP + * IDs. + * + * Additional physical devices sharing the ACPI companion can still use + * resources available from it but they will be matched normally using functions + * provided by their bus types (and analogously for their modalias). + */ +struct acpi_device *acpi_companion_match(const struct device *dev) +{ + struct acpi_device *adev; + + adev = ACPI_COMPANION(dev); + if (!adev) + return NULL; + + if (list_empty(&adev->pnp.ids)) + return NULL; + + return acpi_primary_dev_companion(adev, dev); +} + +/** + * acpi_of_match_device - Match device object using the "compatible" property. + * @adev: ACPI device object to match. + * @of_match_table: List of device IDs to match against. + * + * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of + * identifiers and a _DSD object with the "compatible" property, use that + * property to match against the given list of identifiers. + */ +static bool acpi_of_match_device(struct acpi_device *adev, + const struct of_device_id *of_match_table) +{ + const union acpi_object *of_compatible, *obj; + int i, nval; + + if (!adev) + return false; + + of_compatible = adev->data.of_compatible; + if (!of_match_table || !of_compatible) + return false; + + if (of_compatible->type == ACPI_TYPE_PACKAGE) { + nval = of_compatible->package.count; + obj = of_compatible->package.elements; + } else { /* Must be ACPI_TYPE_STRING. */ + nval = 1; + obj = of_compatible; + } + /* Now we can look for the driver DT compatible strings */ + for (i = 0; i < nval; i++, obj++) { + const struct of_device_id *id; + + for (id = of_match_table; id->compatible[0]; id++) + if (!strcasecmp(obj->string.pointer, id->compatible)) + return true; + } + + return false; +} + +static bool __acpi_match_device_cls(const struct acpi_device_id *id, + struct acpi_hardware_id *hwid) +{ + int i, msk, byte_shift; + char buf[3]; + + if (!id->cls) + return false; + + /* Apply class-code bitmask, before checking each class-code byte */ + for (i = 1; i <= 3; i++) { + byte_shift = 8 * (3 - i); + msk = (id->cls_msk >> byte_shift) & 0xFF; + if (!msk) + continue; + + sprintf(buf, "%02x", (id->cls >> byte_shift) & msk); + if (strncmp(buf, &hwid->id[(i - 1) * 2], 2)) + return false; + } + return true; +} + +static const struct acpi_device_id *__acpi_match_device( + struct acpi_device *device, + const struct acpi_device_id *ids, + const struct of_device_id *of_ids) +{ + const struct acpi_device_id *id; + struct acpi_hardware_id *hwid; + + /* + * If the device is not present, it is unnecessary to load device + * driver for it. + */ + if (!device || !device->status.present) + return NULL; + + list_for_each_entry(hwid, &device->pnp.ids, list) { + /* First, check the ACPI/PNP IDs provided by the caller. */ + for (id = ids; id->id[0] || id->cls; id++) { + if (id->id[0] && !strcmp((char *) id->id, hwid->id)) + return id; + else if (id->cls && __acpi_match_device_cls(id, hwid)) + return id; + } + + /* + * Next, check ACPI_DT_NAMESPACE_HID and try to match the + * "compatible" property if found. + * + * The id returned by the below is not valid, but the only + * caller passing non-NULL of_ids here is only interested in + * whether or not the return value is NULL. + */ + if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id) + && acpi_of_match_device(device, of_ids)) + return id; + } + return NULL; +} + +/** + * acpi_match_device - Match a struct device against a given list of ACPI IDs + * @ids: Array of struct acpi_device_id object to match against. + * @dev: The device structure to match. + * + * Check if @dev has a valid ACPI handle and if there is a struct acpi_device + * object for that handle and use that object to match against a given list of + * device IDs. + * + * Return a pointer to the first matching ID on success or %NULL on failure. + */ +const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, + const struct device *dev) +{ + return __acpi_match_device(acpi_companion_match(dev), ids, NULL); +} +EXPORT_SYMBOL_GPL(acpi_match_device); + +int acpi_match_device_ids(struct acpi_device *device, + const struct acpi_device_id *ids) +{ + return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT; +} +EXPORT_SYMBOL(acpi_match_device_ids); + +bool acpi_driver_match_device(struct device *dev, + const struct device_driver *drv) +{ + if (!drv->acpi_match_table) + return acpi_of_match_device(ACPI_COMPANION(dev), + drv->of_match_table); + + return !!__acpi_match_device(acpi_companion_match(dev), + drv->acpi_match_table, drv->of_match_table); +} +EXPORT_SYMBOL_GPL(acpi_driver_match_device); + +/* -------------------------------------------------------------------------- + ACPI Driver Management + -------------------------------------------------------------------------- */ + +/** + * acpi_bus_register_driver - register a driver with the ACPI bus + * @driver: driver being registered + * + * Registers a driver with the ACPI bus. Searches the namespace for all + * devices that match the driver's criteria and binds. Returns zero for + * success or a negative error status for failure. + */ +int acpi_bus_register_driver(struct acpi_driver *driver) +{ + int ret; + + if (acpi_disabled) + return -ENODEV; + driver->drv.name = driver->name; + driver->drv.bus = &acpi_bus_type; + driver->drv.owner = driver->owner; + + ret = driver_register(&driver->drv); + return ret; +} + +EXPORT_SYMBOL(acpi_bus_register_driver); + +/** + * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus + * @driver: driver to unregister + * + * Unregisters a driver with the ACPI bus. Searches the namespace for all + * devices that match the driver's criteria and unbinds. + */ +void acpi_bus_unregister_driver(struct acpi_driver *driver) +{ + driver_unregister(&driver->drv); +} + +EXPORT_SYMBOL(acpi_bus_unregister_driver); + +/* -------------------------------------------------------------------------- + ACPI Bus operations + -------------------------------------------------------------------------- */ + +static int acpi_bus_match(struct device *dev, struct device_driver *drv) +{ + struct acpi_device *acpi_dev = to_acpi_device(dev); + struct acpi_driver *acpi_drv = to_acpi_driver(drv); + + return acpi_dev->flags.match_driver + && !acpi_match_device_ids(acpi_dev, acpi_drv->ids); +} + +static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + return __acpi_device_uevent_modalias(to_acpi_device(dev), env); +} + +static int acpi_device_probe(struct device *dev) +{ + struct acpi_device *acpi_dev = to_acpi_device(dev); + struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver); + int ret; + + if (acpi_dev->handler && !acpi_is_pnp_device(acpi_dev)) + return -EINVAL; + + if (!acpi_drv->ops.add) + return -ENOSYS; + + ret = acpi_drv->ops.add(acpi_dev); + if (ret) + return ret; + + acpi_dev->driver = acpi_drv; + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Driver [%s] successfully bound to device [%s]\n", + acpi_drv->name, acpi_dev->pnp.bus_id)); + + if (acpi_drv->ops.notify) { + ret = acpi_device_install_notify_handler(acpi_dev); + if (ret) { + if (acpi_drv->ops.remove) + acpi_drv->ops.remove(acpi_dev); + + acpi_dev->driver = NULL; + acpi_dev->driver_data = NULL; + return ret; + } + } + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n", + acpi_drv->name, acpi_dev->pnp.bus_id)); + get_device(dev); + return 0; +} + +static int acpi_device_remove(struct device * dev) +{ + struct acpi_device *acpi_dev = to_acpi_device(dev); + struct acpi_driver *acpi_drv = acpi_dev->driver; + + if (acpi_drv) { + if (acpi_drv->ops.notify) + acpi_device_remove_notify_handler(acpi_dev); + if (acpi_drv->ops.remove) + acpi_drv->ops.remove(acpi_dev); + } + acpi_dev->driver = NULL; + acpi_dev->driver_data = NULL; + + put_device(dev); + return 0; +} + +struct bus_type acpi_bus_type = { + .name = "acpi", + .match = acpi_bus_match, + .probe = acpi_device_probe, + .remove = acpi_device_remove, + .uevent = acpi_device_uevent, +}; + /* -------------------------------------------------------------------------- Initialization/Cleanup -------------------------------------------------------------------------- */ @@ -661,7 +1053,9 @@ static int __init acpi_bus_init(void) */ acpi_root_dir = proc_mkdir(ACPI_BUS_FILE_ROOT, NULL); - return 0; + result = bus_register(&acpi_bus_type); + if (!result) + return 0; /* Mimic structured exception handling */ error1: diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 6d5d1832a..5c3b0918d 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -16,10 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c index 6c9ee68e4..d0918d421 100644 --- a/drivers/acpi/cm_sbs.c +++ b/drivers/acpi/cm_sbs.c @@ -11,10 +11,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c index c8ead9f97..12c240903 100644 --- a/drivers/acpi/container.c +++ b/drivers/acpi/container.c @@ -20,10 +20,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c index 6b1919f6b..68bb305b9 100644 --- a/drivers/acpi/debugfs.c +++ b/drivers/acpi/debugfs.c @@ -7,6 +7,8 @@ #include #include +#include "internal.h" + #define _COMPONENT ACPI_SYSTEM_COMPONENT ACPI_MODULE_NAME("debugfs"); diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 88dbbb115..4806b7f85 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -15,10 +15,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ @@ -1123,6 +1119,14 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on) if (dev->pm_domain) return -EEXIST; + /* + * Only attach the power domain to the first device if the + * companion is shared by multiple. This is to prevent doing power + * management twice. + */ + if (!acpi_device_is_first_physical_node(adev, dev)) + return -EBUSY; + acpi_add_pm_notifier(adev, dev, acpi_pm_notify_work_func); dev->pm_domain = &acpi_general_pm_domain; if (power_on) { diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c new file mode 100644 index 000000000..4ab4582e5 --- /dev/null +++ b/drivers/acpi/device_sysfs.c @@ -0,0 +1,521 @@ +/* + * drivers/acpi/device_sysfs.c - ACPI device sysfs attributes and modalias. + * + * Copyright (C) 2015, Intel Corp. + * Author: Mika Westerberg + * Author: Rafael J. Wysocki + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include + +#include "internal.h" + +/** + * create_pnp_modalias - Create hid/cid(s) string for modalias and uevent + * @acpi_dev: ACPI device object. + * @modalias: Buffer to print into. + * @size: Size of the buffer. + * + * Creates hid/cid(s) string needed for modalias and uevent + * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get: + * char *modalias: "acpi:IBM0001:ACPI0001" + * Return: 0: no _HID and no _CID + * -EINVAL: output error + * -ENOMEM: output is truncated +*/ +static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, + int size) +{ + int len; + int count; + struct acpi_hardware_id *id; + + /* + * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should + * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the + * device's list. + */ + count = 0; + list_for_each_entry(id, &acpi_dev->pnp.ids, list) + if (strcmp(id->id, ACPI_DT_NAMESPACE_HID)) + count++; + + if (!count) + return 0; + + len = snprintf(modalias, size, "acpi:"); + if (len <= 0) + return len; + + size -= len; + + list_for_each_entry(id, &acpi_dev->pnp.ids, list) { + if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID)) + continue; + + count = snprintf(&modalias[len], size, "%s:", id->id); + if (count < 0) + return -EINVAL; + + if (count >= size) + return -ENOMEM; + + len += count; + size -= count; + } + modalias[len] = '\0'; + return len; +} + +/** + * create_of_modalias - Creates DT compatible string for modalias and uevent + * @acpi_dev: ACPI device object. + * @modalias: Buffer to print into. + * @size: Size of the buffer. + * + * Expose DT compatible modalias as of:NnameTCcompatible. This function should + * only be called for devices having ACPI_DT_NAMESPACE_HID in their list of + * ACPI/PNP IDs. + */ +static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias, + int size) +{ + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; + const union acpi_object *of_compatible, *obj; + int len, count; + int i, nval; + char *c; + + acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf); + /* DT strings are all in lower case */ + for (c = buf.pointer; *c != '\0'; c++) + *c = tolower(*c); + + len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer); + ACPI_FREE(buf.pointer); + + if (len <= 0) + return len; + + of_compatible = acpi_dev->data.of_compatible; + if (of_compatible->type == ACPI_TYPE_PACKAGE) { + nval = of_compatible->package.count; + obj = of_compatible->package.elements; + } else { /* Must be ACPI_TYPE_STRING. */ + nval = 1; + obj = of_compatible; + } + for (i = 0; i < nval; i++, obj++) { + count = snprintf(&modalias[len], size, "C%s", + obj->string.pointer); + if (count < 0) + return -EINVAL; + + if (count >= size) + return -ENOMEM; + + len += count; + size -= count; + } + modalias[len] = '\0'; + return len; +} + +int __acpi_device_uevent_modalias(struct acpi_device *adev, + struct kobj_uevent_env *env) +{ + int len; + + if (!adev) + return -ENODEV; + + if (list_empty(&adev->pnp.ids)) + return 0; + + if (add_uevent_var(env, "MODALIAS=")) + return -ENOMEM; + + len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], + sizeof(env->buf) - env->buflen); + if (len < 0) + return len; + + env->buflen += len; + if (!adev->data.of_compatible) + return 0; + + if (len > 0 && add_uevent_var(env, "MODALIAS=")) + return -ENOMEM; + + len = create_of_modalias(adev, &env->buf[env->buflen - 1], + sizeof(env->buf) - env->buflen); + if (len < 0) + return len; + + env->buflen += len; + + return 0; +} + +/** + * acpi_device_uevent_modalias - uevent modalias for ACPI-enumerated devices. + * + * Create the uevent modalias field for ACPI-enumerated devices. + * + * Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with + * hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001". + */ +int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env) +{ + return __acpi_device_uevent_modalias(acpi_companion_match(dev), env); +} +EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias); + +static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size) +{ + int len, count; + + if (!adev) + return -ENODEV; + + if (list_empty(&adev->pnp.ids)) + return 0; + + len = create_pnp_modalias(adev, buf, size - 1); + if (len < 0) { + return len; + } else if (len > 0) { + buf[len++] = '\n'; + size -= len; + } + if (!adev->data.of_compatible) + return len; + + count = create_of_modalias(adev, buf + len, size - 1); + if (count < 0) { + return count; + } else if (count > 0) { + len += count; + buf[len++] = '\n'; + } + + return len; +} + +/** + * acpi_device_modalias - modalias sysfs attribute for ACPI-enumerated devices. + * + * Create the modalias sysfs attribute for ACPI-enumerated devices. + * + * Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with + * hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001". + */ +int acpi_device_modalias(struct device *dev, char *buf, int size) +{ + return __acpi_device_modalias(acpi_companion_match(dev), buf, size); +} +EXPORT_SYMBOL_GPL(acpi_device_modalias); + +static ssize_t +acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { + return __acpi_device_modalias(to_acpi_device(dev), buf, 1024); +} +static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL); + +static ssize_t real_power_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_device *adev = to_acpi_device(dev); + int state; + int ret; + + ret = acpi_device_get_power(adev, &state); + if (ret) + return ret; + + return sprintf(buf, "%s\n", acpi_power_state_string(state)); +} + +static DEVICE_ATTR(real_power_state, 0444, real_power_state_show, NULL); + +static ssize_t power_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_device *adev = to_acpi_device(dev); + + return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state)); +} + +static DEVICE_ATTR(power_state, 0444, power_state_show, NULL); + +static ssize_t +acpi_eject_store(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct acpi_device *acpi_device = to_acpi_device(d); + acpi_object_type not_used; + acpi_status status; + + if (!count || buf[0] != '1') + return -EINVAL; + + if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled) + && !acpi_device->driver) + return -ENODEV; + + status = acpi_get_type(acpi_device->handle, ¬_used); + if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable) + return -ENODEV; + + get_device(&acpi_device->dev); + status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT); + if (ACPI_SUCCESS(status)) + return count; + + put_device(&acpi_device->dev); + acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT, + ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL); + return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN; +} + +static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store); + +static ssize_t +acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct acpi_device *acpi_dev = to_acpi_device(dev); + + return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev)); +} +static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL); + +static ssize_t acpi_device_uid_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_device *acpi_dev = to_acpi_device(dev); + + return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id); +} +static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL); + +static ssize_t acpi_device_adr_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_device *acpi_dev = to_acpi_device(dev); + + return sprintf(buf, "0x%08x\n", + (unsigned int)(acpi_dev->pnp.bus_address)); +} +static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL); + +static ssize_t +acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct acpi_device *acpi_dev = to_acpi_device(dev); + struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL}; + int result; + + result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path); + if (result) + goto end; + + result = sprintf(buf, "%s\n", (char*)path.pointer); + kfree(path.pointer); +end: + return result; +} +static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL); + +/* sysfs file that shows description text from the ACPI _STR method */ +static ssize_t description_show(struct device *dev, + struct device_attribute *attr, + char *buf) { + struct acpi_device *acpi_dev = to_acpi_device(dev); + int result; + + if (acpi_dev->pnp.str_obj == NULL) + return 0; + + /* + * The _STR object contains a Unicode identifier for a device. + * We need to convert to utf-8 so it can be displayed. + */ + result = utf16s_to_utf8s( + (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer, + acpi_dev->pnp.str_obj->buffer.length, + UTF16_LITTLE_ENDIAN, buf, + PAGE_SIZE); + + buf[result++] = '\n'; + + return result; +} +static DEVICE_ATTR(description, 0444, description_show, NULL); + +static ssize_t +acpi_device_sun_show(struct device *dev, struct device_attribute *attr, + char *buf) { + struct acpi_device *acpi_dev = to_acpi_device(dev); + acpi_status status; + unsigned long long sun; + + status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun); + if (ACPI_FAILURE(status)) + return -ENODEV; + + return sprintf(buf, "%llu\n", sun); +} +static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL); + +static ssize_t status_show(struct device *dev, struct device_attribute *attr, + char *buf) { + struct acpi_device *acpi_dev = to_acpi_device(dev); + acpi_status status; + unsigned long long sta; + + status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta); + if (ACPI_FAILURE(status)) + return -ENODEV; + + return sprintf(buf, "%llu\n", sta); +} +static DEVICE_ATTR_RO(status); + +/** + * acpi_device_setup_files - Create sysfs attributes of an ACPI device. + * @dev: ACPI device object. + */ +int acpi_device_setup_files(struct acpi_device *dev) +{ + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + acpi_status status; + int result = 0; + + /* + * Devices gotten from FADT don't have a "path" attribute + */ + if (dev->handle) { + result = device_create_file(&dev->dev, &dev_attr_path); + if (result) + goto end; + } + + if (!list_empty(&dev->pnp.ids)) { + result = device_create_file(&dev->dev, &dev_attr_hid); + if (result) + goto end; + + result = device_create_file(&dev->dev, &dev_attr_modalias); + if (result) + goto end; + } + + /* + * If device has _STR, 'description' file is created + */ + if (acpi_has_method(dev->handle, "_STR")) { + status = acpi_evaluate_object(dev->handle, "_STR", + NULL, &buffer); + if (ACPI_FAILURE(status)) + buffer.pointer = NULL; + dev->pnp.str_obj = buffer.pointer; + result = device_create_file(&dev->dev, &dev_attr_description); + if (result) + goto end; + } + + if (dev->pnp.type.bus_address) + result = device_create_file(&dev->dev, &dev_attr_adr); + if (dev->pnp.unique_id) + result = device_create_file(&dev->dev, &dev_attr_uid); + + if (acpi_has_method(dev->handle, "_SUN")) { + result = device_create_file(&dev->dev, &dev_attr_sun); + if (result) + goto end; + } + + if (acpi_has_method(dev->handle, "_STA")) { + result = device_create_file(&dev->dev, &dev_attr_status); + if (result) + goto end; + } + + /* + * If device has _EJ0, 'eject' file is created that is used to trigger + * hot-removal function from userland. + */ + if (acpi_has_method(dev->handle, "_EJ0")) { + result = device_create_file(&dev->dev, &dev_attr_eject); + if (result) + return result; + } + + if (dev->flags.power_manageable) { + result = device_create_file(&dev->dev, &dev_attr_power_state); + if (result) + return result; + + if (dev->power.flags.power_resources) + result = device_create_file(&dev->dev, + &dev_attr_real_power_state); + } + +end: + return result; +} + +/** + * acpi_device_remove_files - Remove sysfs attributes of an ACPI device. + * @dev: ACPI device object. + */ +void acpi_device_remove_files(struct acpi_device *dev) +{ + if (dev->flags.power_manageable) { + device_remove_file(&dev->dev, &dev_attr_power_state); + if (dev->power.flags.power_resources) + device_remove_file(&dev->dev, + &dev_attr_real_power_state); + } + + /* + * If device has _STR, remove 'description' file + */ + if (acpi_has_method(dev->handle, "_STR")) { + kfree(dev->pnp.str_obj); + device_remove_file(&dev->dev, &dev_attr_description); + } + /* + * If device has _EJ0, remove 'eject' file. + */ + if (acpi_has_method(dev->handle, "_EJ0")) + device_remove_file(&dev->dev, &dev_attr_eject); + + if (acpi_has_method(dev->handle, "_SUN")) + device_remove_file(&dev->dev, &dev_attr_sun); + + if (dev->pnp.unique_id) + device_remove_file(&dev->dev, &dev_attr_uid); + if (dev->pnp.type.bus_address) + device_remove_file(&dev->dev, &dev_attr_adr); + device_remove_file(&dev->dev, &dev_attr_modalias); + device_remove_file(&dev->dev, &dev_attr_hid); + if (acpi_has_method(dev->handle, "_STA")) + device_remove_file(&dev->dev, &dev_attr_status); + if (dev->handle) + device_remove_file(&dev->dev, &dev_attr_path); +} diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index a688aa243..e8e128ded 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -17,10 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 9d4761d2f..42c66b64c 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -22,10 +22,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ @@ -165,8 +161,16 @@ struct transaction { u8 flags; }; +struct acpi_ec_query { + struct transaction transaction; + struct work_struct work; + struct acpi_ec_query_handler *handler; +}; + static int acpi_ec_query(struct acpi_ec *ec, u8 *data); static void advance_transaction(struct acpi_ec *ec); +static void acpi_ec_event_handler(struct work_struct *work); +static void acpi_ec_event_processor(struct work_struct *work); struct acpi_ec *boot_ec, *first_ec; EXPORT_SYMBOL(first_ec); @@ -978,60 +982,92 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) } EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); -static void acpi_ec_run(void *cxt) +static struct acpi_ec_query *acpi_ec_create_query(u8 *pval) { - struct acpi_ec_query_handler *handler = cxt; + struct acpi_ec_query *q; + struct transaction *t; + + q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL); + if (!q) + return NULL; + INIT_WORK(&q->work, acpi_ec_event_processor); + t = &q->transaction; + t->command = ACPI_EC_COMMAND_QUERY; + t->rdata = pval; + t->rlen = 1; + return q; +} + +static void acpi_ec_delete_query(struct acpi_ec_query *q) +{ + if (q) { + if (q->handler) + acpi_ec_put_query_handler(q->handler); + kfree(q); + } +} + +static void acpi_ec_event_processor(struct work_struct *work) +{ + struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work); + struct acpi_ec_query_handler *handler = q->handler; - if (!handler) - return; ec_dbg_evt("Query(0x%02x) started", handler->query_bit); if (handler->func) handler->func(handler->data); else if (handler->handle) acpi_evaluate_object(handler->handle, NULL, NULL, NULL); ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit); - acpi_ec_put_query_handler(handler); + acpi_ec_delete_query(q); } static int acpi_ec_query(struct acpi_ec *ec, u8 *data) { u8 value = 0; int result; - acpi_status status; struct acpi_ec_query_handler *handler; - struct transaction t = {.command = ACPI_EC_COMMAND_QUERY, - .wdata = NULL, .rdata = &value, - .wlen = 0, .rlen = 1}; + struct acpi_ec_query *q; + + q = acpi_ec_create_query(&value); + if (!q) + return -ENOMEM; /* * Query the EC to find out which _Qxx method we need to evaluate. * Note that successful completion of the query causes the ACPI_EC_SCI * bit to be cleared (and thus clearing the interrupt source). */ - result = acpi_ec_transaction(ec, &t); - if (result) - return result; - if (data) - *data = value; + result = acpi_ec_transaction(ec, &q->transaction); if (!value) - return -ENODATA; + result = -ENODATA; + if (result) + goto err_exit; mutex_lock(&ec->mutex); + result = -ENODATA; list_for_each_entry(handler, &ec->list, node) { if (value == handler->query_bit) { - /* have custom handler for this bit */ - handler = acpi_ec_get_query_handler(handler); + result = 0; + q->handler = acpi_ec_get_query_handler(handler); ec_dbg_evt("Query(0x%02x) scheduled", - handler->query_bit); - status = acpi_os_execute((handler->func) ? - OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER, - acpi_ec_run, handler); - if (ACPI_FAILURE(status)) + q->handler->query_bit); + /* + * It is reported that _Qxx are evaluated in a + * parallel way on Windows: + * https://bugzilla.kernel.org/show_bug.cgi?id=94411 + */ + if (!schedule_work(&q->work)) result = -EBUSY; break; } } mutex_unlock(&ec->mutex); + +err_exit: + if (result && q) + acpi_ec_delete_query(q); + if (data) + *data = value; return result; } diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index bea0bbaaf..e297a480e 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -16,10 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c index a322710b5..5c67a6d8f 100644 --- a/drivers/acpi/hed.c +++ b/drivers/acpi/hed.c @@ -15,10 +15,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include diff --git a/drivers/acpi/int340x_thermal.c b/drivers/acpi/int340x_thermal.c index 9dcf83682..33505c651 100644 --- a/drivers/acpi/int340x_thermal.c +++ b/drivers/acpi/int340x_thermal.c @@ -33,13 +33,12 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = { static int int340x_thermal_handler_attach(struct acpi_device *adev, const struct acpi_device_id *id) { -#if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE) - acpi_create_platform_device(adev); -#elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE) + if (IS_ENABLED(CONFIG_INT340X_THERMAL)) + acpi_create_platform_device(adev); /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */ - if (id->driver_data == INT3401_DEVICE) + else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) && + id->driver_data == INT3401_DEVICE) acpi_create_platform_device(adev); -#endif return 1; } diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 4683a9693..9e426210c 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -13,9 +13,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _ACPI_INTERNAL_H_ @@ -70,7 +67,7 @@ void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val); #ifdef CONFIG_DEBUG_FS extern struct dentry *acpi_debugfs_dir; -int acpi_debugfs_init(void); +void acpi_debugfs_init(void); #else static inline void acpi_debugfs_init(void) { return; } #endif @@ -93,10 +90,21 @@ int acpi_device_add(struct acpi_device *device, void (*release)(struct device *)); void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, int type, unsigned long long sta); +int acpi_device_setup_files(struct acpi_device *dev); +void acpi_device_remove_files(struct acpi_device *dev); void acpi_device_add_finalize(struct acpi_device *device); void acpi_free_pnp_ids(struct acpi_device_pnp *pnp); bool acpi_device_is_present(struct acpi_device *adev); bool acpi_device_is_battery(struct acpi_device *adev); +bool acpi_device_is_first_physical_node(struct acpi_device *adev, + const struct device *dev); + +/* -------------------------------------------------------------------------- + Device Matching and Notification + -------------------------------------------------------------------------- */ +struct acpi_device *acpi_companion_match(const struct device *dev); +int __acpi_device_uevent_modalias(struct acpi_device *adev, + struct kobj_uevent_env *env); /* -------------------------------------------------------------------------- Power Resource diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index cf0fd96a7..c1b8d03e2 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "nfit.h" /* @@ -764,9 +765,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, struct acpi_device *adev, *adev_dimm; struct device *dev = acpi_desc->dev; const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM); - unsigned long long sta; - int i, rc = -ENODEV; - acpi_status status; + int i; nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en; adev = to_acpi_dev(acpi_desc); @@ -781,25 +780,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, return force_enable_dimms ? 0 : -ENODEV; } - status = acpi_evaluate_integer(adev_dimm->handle, "_STA", NULL, &sta); - if (status == AE_NOT_FOUND) { - dev_dbg(dev, "%s missing _STA, assuming enabled...\n", - dev_name(&adev_dimm->dev)); - rc = 0; - } else if (ACPI_FAILURE(status)) - dev_err(dev, "%s failed to retrieve_STA, disabling...\n", - dev_name(&adev_dimm->dev)); - else if ((sta & ACPI_STA_DEVICE_ENABLED) == 0) - dev_info(dev, "%s disabled by firmware\n", - dev_name(&adev_dimm->dev)); - else - rc = 0; - for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++) if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i)) set_bit(i, &nfit_mem->dsm_mask); - return force_enable_dimms ? 0 : rc; + return 0; } static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) @@ -868,6 +853,7 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) struct acpi_device *adev; int i; + nd_desc->dsm_mask = acpi_desc->bus_dsm_force_en; adev = to_acpi_dev(acpi_desc); if (!adev) return; @@ -1032,7 +1018,7 @@ static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) if (mmio->num_lines) offset = to_interleave_offset(offset, mmio); - return readl(mmio->base + offset); + return readl(mmio->addr.base + offset); } static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, @@ -1057,11 +1043,11 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, if (mmio->num_lines) offset = to_interleave_offset(offset, mmio); - writeq(cmd, mmio->base + offset); + writeq(cmd, mmio->addr.base + offset); wmb_blk(nfit_blk); if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH) - readq(mmio->base + offset); + readq(mmio->addr.base + offset); } static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, @@ -1093,11 +1079,16 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, } if (rw) - memcpy_to_pmem(mmio->aperture + offset, + memcpy_to_pmem(mmio->addr.aperture + offset, iobuf + copied, c); - else + else { + if (nfit_blk->dimm_flags & ND_BLK_READ_FLUSH) + mmio_flush_range((void __force *) + mmio->addr.aperture + offset, c); + memcpy_from_pmem(iobuf + copied, - mmio->aperture + offset, c); + mmio->addr.aperture + offset, c); + } copied += c; len -= c; @@ -1144,7 +1135,10 @@ static void nfit_spa_mapping_release(struct kref *kref) WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index); - iounmap(spa_map->iomem); + if (spa_map->type == SPA_MAP_APERTURE) + memunmap((void __force *)spa_map->addr.aperture); + else + iounmap(spa_map->addr.base); release_mem_region(spa->address, spa->length); list_del(&spa_map->list); kfree(spa_map); @@ -1190,7 +1184,7 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, spa_map = find_spa_mapping(acpi_desc, spa); if (spa_map) { kref_get(&spa_map->kref); - return spa_map->iomem; + return spa_map->addr.base; } spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL); @@ -1206,20 +1200,19 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, if (!res) goto err_mem; - if (type == SPA_MAP_APERTURE) { - /* - * TODO: memremap_pmem() support, but that requires cache - * flushing when the aperture is moved. - */ - spa_map->iomem = ioremap_wc(start, n); - } else - spa_map->iomem = ioremap_nocache(start, n); + spa_map->type = type; + if (type == SPA_MAP_APERTURE) + spa_map->addr.aperture = (void __pmem *)memremap(start, n, + ARCH_MEMREMAP_PMEM); + else + spa_map->addr.base = ioremap_nocache(start, n); + - if (!spa_map->iomem) + if (!spa_map->addr.base) goto err_map; list_add_tail(&spa_map->list, &acpi_desc->spa_maps); - return spa_map->iomem; + return spa_map->addr.base; err_map: release_mem_region(start, n); @@ -1282,7 +1275,7 @@ static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, nfit_blk->dimm_flags = flags.flags; else if (rc == -ENOTTY) { /* fall back to a conservative default */ - nfit_blk->dimm_flags = ND_BLK_DCR_LATCH; + nfit_blk->dimm_flags = ND_BLK_DCR_LATCH | ND_BLK_READ_FLUSH; rc = 0; } else rc = -ENXIO; @@ -1322,9 +1315,9 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, /* map block aperture memory */ nfit_blk->bdw_offset = nfit_mem->bdw->offset; mmio = &nfit_blk->mmio[BDW]; - mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw, + mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw, SPA_MAP_APERTURE); - if (!mmio->base) { + if (!mmio->addr.base) { dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, nvdimm_name(nvdimm)); return -ENOMEM; @@ -1345,9 +1338,9 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; nfit_blk->stat_offset = nfit_mem->dcr->status_offset; mmio = &nfit_blk->mmio[DCR]; - mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr, + mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr, SPA_MAP_CONTROL); - if (!mmio->base) { + if (!mmio->addr.base) { dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, nvdimm_name(nvdimm)); return -ENOMEM; @@ -1379,7 +1372,7 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, return -ENOMEM; } - if (!arch_has_pmem_api() && !nfit_blk->nvdimm_flush) + if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush) dev_warn(dev, "unable to guarantee persistence of writes\n"); if (mmio->line_size == 0) @@ -1414,7 +1407,7 @@ static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus, for (i = 0; i < 2; i++) { struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i]; - if (mmio->base) + if (mmio->addr.base) nfit_spa_unmap(acpi_desc, mmio->spa); } nd_blk_region_set_provider_data(ndbr, NULL); diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h index 79b6d8387..7e740156b 100644 --- a/drivers/acpi/nfit.h +++ b/drivers/acpi/nfit.h @@ -41,6 +41,7 @@ enum nfit_uuids { }; enum { + ND_BLK_READ_FLUSH = 1, ND_BLK_DCR_LATCH = 2, }; @@ -107,6 +108,7 @@ struct acpi_nfit_desc { struct nvdimm_bus *nvdimm_bus; struct device *dev; unsigned long dimm_dsm_force_en; + unsigned long bus_dsm_force_en; int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, void *iobuf, u64 len, int rw); }; @@ -116,12 +118,16 @@ enum nd_blk_mmio_selector { DCR, }; +struct nd_blk_addr { + union { + void __iomem *base; + void __pmem *aperture; + }; +}; + struct nfit_blk { struct nfit_blk_mmio { - union { - void __iomem *base; - void __pmem *aperture; - }; + struct nd_blk_addr addr; u64 size; u64 base_offset; u32 line_size; @@ -148,7 +154,8 @@ struct nfit_spa_mapping { struct acpi_nfit_system_address *spa; struct list_head list; struct kref kref; - void __iomem *iomem; + enum spa_map_type type; + struct nd_blk_addr addr; }; static inline struct nfit_spa_mapping *to_spa_map(struct kref *kref) diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index acaa3b4ea..72b6e9ef0 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -15,10 +15,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * */ diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 3b8963f21..739a4a6b3 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -19,10 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * */ @@ -47,6 +43,7 @@ #include #include +#include #include "internal.h" @@ -83,6 +80,7 @@ static void *acpi_irq_context; static struct workqueue_struct *kacpid_wq; static struct workqueue_struct *kacpi_notify_wq; static struct workqueue_struct *kacpi_hotplug_wq; +static bool acpi_os_initialized; /* * This list of permanent mappings is for memory that may be accessed from @@ -947,21 +945,6 @@ acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) EXPORT_SYMBOL(acpi_os_write_port); -#ifdef readq -static inline u64 read64(const volatile void __iomem *addr) -{ - return readq(addr); -} -#else -static inline u64 read64(const volatile void __iomem *addr) -{ - u64 l, h; - l = readl(addr); - h = readl(addr+4); - return l | (h << 32); -} -#endif - acpi_status acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width) { @@ -994,7 +977,7 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width) *(u32 *) value = readl(virt_addr); break; case 64: - *(u64 *) value = read64(virt_addr); + *(u64 *) value = readq(virt_addr); break; default: BUG(); @@ -1008,19 +991,6 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width) return AE_OK; } -#ifdef writeq -static inline void write64(u64 val, volatile void __iomem *addr) -{ - writeq(val, addr); -} -#else -static inline void write64(u64 val, volatile void __iomem *addr) -{ - writel(val, addr); - writel(val>>32, addr+4); -} -#endif - acpi_status acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) { @@ -1049,7 +1019,7 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) writel(value, virt_addr); break; case 64: - write64(value, virt_addr); + writeq(value, virt_addr); break; default: BUG(); @@ -1316,6 +1286,9 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) long jiffies; int ret = 0; + if (!acpi_os_initialized) + return AE_OK; + if (!sem || (units < 1)) return AE_BAD_PARAMETER; @@ -1355,6 +1328,9 @@ acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) { struct semaphore *sem = (struct semaphore *)handle; + if (!acpi_os_initialized) + return AE_OK; + if (!sem || (units < 1)) return AE_BAD_PARAMETER; @@ -1863,6 +1839,7 @@ acpi_status __init acpi_os_initialize(void) rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register); pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv); } + acpi_os_initialized = true; return AE_OK; } diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 304eccb0a..c9336751e 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -19,10 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ @@ -376,6 +372,7 @@ static int acpi_isa_register_gsi(struct pci_dev *dev) /* Interrupt Line values above 0xF are forbidden */ if (dev->irq > 0 && (dev->irq <= 0xF) && + acpi_isa_irq_available(dev->irq) && (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) { dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n", pin_name(dev->pin), dev->irq); @@ -412,7 +409,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) return 0; } - if (dev->irq_managed && dev->irq > 0) + if (pci_has_managed_irq(dev)) return 0; entry = acpi_pci_irq_lookup(dev, pin); @@ -457,8 +454,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) kfree(entry); return rc; } - dev->irq = rc; - dev->irq_managed = 1; + pci_set_managed_irq(dev, rc); if (link) snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link); @@ -481,16 +477,8 @@ void acpi_pci_irq_disable(struct pci_dev *dev) u8 pin; pin = dev->pin; - if (!pin || !dev->irq_managed || dev->irq <= 0) - return; - - /* Keep IOAPIC pin configuration when suspending */ - if (dev->dev.power.is_prepared) + if (!pin || !pci_has_managed_irq(dev)) return; -#ifdef CONFIG_PM - if (dev->dev.power.runtime_status == RPM_SUSPENDING) - return; -#endif entry = acpi_pci_irq_lookup(dev, pin); if (!entry) @@ -511,6 +499,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev) dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin)); if (gsi >= 0) { acpi_unregister_gsi(gsi); - dev->irq_managed = 0; + pci_reset_managed_irq(dev); } } diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index b09ad5544..7c8408b94 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -17,10 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * TBD: @@ -502,8 +498,7 @@ int __init acpi_irq_penalty_init(void) PIRQ_PENALTY_PCI_POSSIBLE; } } - /* Add a penalty for the SCI */ - acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING; + return 0; } @@ -557,6 +552,13 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) irq = link->irq.possible[i]; } } + if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) { + printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. " + "Try pci=noacpi or acpi=off\n", + acpi_device_name(link->device), + acpi_device_bid(link->device)); + return -ENODEV; + } /* Attempt to enable the link device at this IRQ. */ if (acpi_pci_link_set(link, irq)) { @@ -825,6 +827,12 @@ void acpi_penalize_isa_irq(int irq, int active) } } +bool acpi_isa_irq_available(int irq) +{ + return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) || + acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS); +} + /* * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 1b5569c09..393706a52 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -16,10 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c index 139d9e479..7188e53b6 100644 --- a/drivers/acpi/pci_slot.c +++ b/drivers/acpi/pci_slot.c @@ -20,10 +20,6 @@ * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 93eac53b5..fcd4ce6f7 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -1,8 +1,10 @@ /* - * acpi_power.c - ACPI Bus Power Management ($Revision: 39 $) + * drivers/acpi/power.c - ACPI Power Resources management. * - * Copyright (C) 2001, 2002 Andy Grover - * Copyright (C) 2001, 2002 Paul Diefenbaugh + * Copyright (C) 2001 - 2015 Intel Corp. + * Author: Andy Grover + * Author: Paul Diefenbaugh + * Author: Rafael J. Wysocki * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * @@ -16,10 +18,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ @@ -27,10 +25,11 @@ * ACPI power-managed devices may be controlled in two ways: * 1. via "Device Specific (D-State) Control" * 2. via "Power Resource Control". - * This module is used to manage devices relying on Power Resource Control. + * The code below deals with ACPI Power Resources control. * - * An ACPI "power resource object" describes a software controllable power - * plane, clock plane, or other resource used by a power managed device. + * An ACPI "power resource object" represents a software controllable power + * plane, clock plane, or other resource depended on by a device. + * * A device may rely on multiple power resources, and a power resource * may be shared by multiple devices. */ diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index d9f71581b..51e658f21 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -21,10 +21,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ @@ -159,38 +155,28 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block __refdata acpi_cpu_notifier = { +static struct notifier_block acpi_cpu_notifier = { .notifier_call = acpi_cpu_soft_notify, }; -static int __acpi_processor_start(struct acpi_device *device) +#ifdef CONFIG_ACPI_CPU_FREQ_PSS +static int acpi_pss_perf_init(struct acpi_processor *pr, + struct acpi_device *device) { - struct acpi_processor *pr = acpi_driver_data(device); - acpi_status status; int result = 0; - if (!pr) - return -ENODEV; - - if (pr->flags.need_hotplug_init) - return 0; - -#ifdef CONFIG_CPU_FREQ acpi_processor_ppc_has_changed(pr, 0); -#endif + acpi_processor_get_throttling_info(pr); if (pr->flags.throttling) pr->flags.limit = 1; - if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) - acpi_processor_power_init(pr); - pr->cdev = thermal_cooling_device_register("Processor", device, &processor_cooling_ops); if (IS_ERR(pr->cdev)) { result = PTR_ERR(pr->cdev); - goto err_power_exit; + return result; } dev_dbg(&device->dev, "registered as cooling_device%d\n", @@ -204,6 +190,7 @@ static int __acpi_processor_start(struct acpi_device *device) "Failed to create sysfs link 'thermal_cooling'\n"); goto err_thermal_unregister; } + result = sysfs_create_link(&pr->cdev->device.kobj, &device->dev.kobj, "device"); @@ -213,17 +200,61 @@ static int __acpi_processor_start(struct acpi_device *device) goto err_remove_sysfs_thermal; } - status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, - acpi_processor_notify, device); - if (ACPI_SUCCESS(status)) - return 0; - sysfs_remove_link(&pr->cdev->device.kobj, "device"); err_remove_sysfs_thermal: sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); err_thermal_unregister: thermal_cooling_device_unregister(pr->cdev); - err_power_exit: + + return result; +} + +static void acpi_pss_perf_exit(struct acpi_processor *pr, + struct acpi_device *device) +{ + if (pr->cdev) { + sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); + sysfs_remove_link(&pr->cdev->device.kobj, "device"); + thermal_cooling_device_unregister(pr->cdev); + pr->cdev = NULL; + } +} +#else +static inline int acpi_pss_perf_init(struct acpi_processor *pr, + struct acpi_device *device) +{ + return 0; +} + +static inline void acpi_pss_perf_exit(struct acpi_processor *pr, + struct acpi_device *device) {} +#endif /* CONFIG_ACPI_CPU_FREQ_PSS */ + +static int __acpi_processor_start(struct acpi_device *device) +{ + struct acpi_processor *pr = acpi_driver_data(device); + acpi_status status; + int result = 0; + + if (!pr) + return -ENODEV; + + if (pr->flags.need_hotplug_init) + return 0; + + if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) + acpi_processor_power_init(pr); + + result = acpi_pss_perf_init(pr, device); + if (result) + goto err_power_exit; + + status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, + acpi_processor_notify, device); + if (ACPI_SUCCESS(status)) + return 0; + +err_power_exit: acpi_processor_power_exit(pr); return result; } @@ -252,15 +283,10 @@ static int acpi_processor_stop(struct device *dev) pr = acpi_driver_data(device); if (!pr) return 0; - acpi_processor_power_exit(pr); - if (pr->cdev) { - sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); - sysfs_remove_link(&pr->cdev->device.kobj, "device"); - thermal_cooling_device_unregister(pr->cdev); - pr->cdev = NULL; - } + acpi_pss_perf_exit(pr, device); + return 0; } diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index d540f42c9..175c86bee 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -21,10 +21,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index cfc8aba72..bb01dea39 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -20,10 +20,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * */ #include @@ -87,7 +83,7 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb, if (ignore_ppc) return 0; - if (event != CPUFREQ_INCOMPATIBLE) + if (event != CPUFREQ_ADJUST) return 0; mutex_lock(&performance_mutex); @@ -784,9 +780,7 @@ acpi_processor_register_performance(struct acpi_processor_performance EXPORT_SYMBOL(acpi_processor_register_performance); -void -acpi_processor_unregister_performance(struct acpi_processor_performance - *performance, unsigned int cpu) +void acpi_processor_unregister_performance(unsigned int cpu) { struct acpi_processor *pr; diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index e003663b2..1fed84a09 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c @@ -19,10 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index 84243c32e..f170d7463 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -19,10 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 7836e2e98..6d9945054 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -528,13 +528,14 @@ int acpi_dev_prop_read(struct acpi_device *adev, const char *propname, if (!val) return obj->package.count; - else if (nval <= 0) - return -EINVAL; if (nval > obj->package.count) return -EOVERFLOW; + else if (nval <= 0) + return -EINVAL; items = obj->package.elements; + switch (proptype) { case DEV_PROP_U8: ret = acpi_copy_property_array_u8(items, (u8 *)val, nval); diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index f1c966e05..15d22db05 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -15,10 +15,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index 01504c819..cb3dedb1b 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -17,10 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index ec256352f..01136b879 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -115,264 +115,6 @@ int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler, return 0; } -/** - * create_pnp_modalias - Create hid/cid(s) string for modalias and uevent - * @acpi_dev: ACPI device object. - * @modalias: Buffer to print into. - * @size: Size of the buffer. - * - * Creates hid/cid(s) string needed for modalias and uevent - * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get: - * char *modalias: "acpi:IBM0001:ACPI0001" - * Return: 0: no _HID and no _CID - * -EINVAL: output error - * -ENOMEM: output is truncated -*/ -static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, - int size) -{ - int len; - int count; - struct acpi_hardware_id *id; - - /* - * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should - * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the - * device's list. - */ - count = 0; - list_for_each_entry(id, &acpi_dev->pnp.ids, list) - if (strcmp(id->id, ACPI_DT_NAMESPACE_HID)) - count++; - - if (!count) - return 0; - - len = snprintf(modalias, size, "acpi:"); - if (len <= 0) - return len; - - size -= len; - - list_for_each_entry(id, &acpi_dev->pnp.ids, list) { - if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID)) - continue; - - count = snprintf(&modalias[len], size, "%s:", id->id); - if (count < 0) - return -EINVAL; - - if (count >= size) - return -ENOMEM; - - len += count; - size -= count; - } - modalias[len] = '\0'; - return len; -} - -/** - * create_of_modalias - Creates DT compatible string for modalias and uevent - * @acpi_dev: ACPI device object. - * @modalias: Buffer to print into. - * @size: Size of the buffer. - * - * Expose DT compatible modalias as of:NnameTCcompatible. This function should - * only be called for devices having ACPI_DT_NAMESPACE_HID in their list of - * ACPI/PNP IDs. - */ -static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias, - int size) -{ - struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; - const union acpi_object *of_compatible, *obj; - int len, count; - int i, nval; - char *c; - - acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf); - /* DT strings are all in lower case */ - for (c = buf.pointer; *c != '\0'; c++) - *c = tolower(*c); - - len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer); - ACPI_FREE(buf.pointer); - - if (len <= 0) - return len; - - of_compatible = acpi_dev->data.of_compatible; - if (of_compatible->type == ACPI_TYPE_PACKAGE) { - nval = of_compatible->package.count; - obj = of_compatible->package.elements; - } else { /* Must be ACPI_TYPE_STRING. */ - nval = 1; - obj = of_compatible; - } - for (i = 0; i < nval; i++, obj++) { - count = snprintf(&modalias[len], size, "C%s", - obj->string.pointer); - if (count < 0) - return -EINVAL; - - if (count >= size) - return -ENOMEM; - - len += count; - size -= count; - } - modalias[len] = '\0'; - return len; -} - -/* - * acpi_companion_match() - Can we match via ACPI companion device - * @dev: Device in question - * - * Check if the given device has an ACPI companion and if that companion has - * a valid list of PNP IDs, and if the device is the first (primary) physical - * device associated with it. Return the companion pointer if that's the case - * or NULL otherwise. - * - * If multiple physical devices are attached to a single ACPI companion, we need - * to be careful. The usage scenario for this kind of relationship is that all - * of the physical devices in question use resources provided by the ACPI - * companion. A typical case is an MFD device where all the sub-devices share - * the parent's ACPI companion. In such cases we can only allow the primary - * (first) physical device to be matched with the help of the companion's PNP - * IDs. - * - * Additional physical devices sharing the ACPI companion can still use - * resources available from it but they will be matched normally using functions - * provided by their bus types (and analogously for their modalias). - */ -static struct acpi_device *acpi_companion_match(const struct device *dev) -{ - struct acpi_device *adev; - struct mutex *physical_node_lock; - - adev = ACPI_COMPANION(dev); - if (!adev) - return NULL; - - if (list_empty(&adev->pnp.ids)) - return NULL; - - physical_node_lock = &adev->physical_node_lock; - mutex_lock(physical_node_lock); - if (list_empty(&adev->physical_node_list)) { - adev = NULL; - } else { - const struct acpi_device_physical_node *node; - - node = list_first_entry(&adev->physical_node_list, - struct acpi_device_physical_node, node); - if (node->dev != dev) - adev = NULL; - } - mutex_unlock(physical_node_lock); - - return adev; -} - -static int __acpi_device_uevent_modalias(struct acpi_device *adev, - struct kobj_uevent_env *env) -{ - int len; - - if (!adev) - return -ENODEV; - - if (list_empty(&adev->pnp.ids)) - return 0; - - if (add_uevent_var(env, "MODALIAS=")) - return -ENOMEM; - - len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], - sizeof(env->buf) - env->buflen); - if (len < 0) - return len; - - env->buflen += len; - if (!adev->data.of_compatible) - return 0; - - if (len > 0 && add_uevent_var(env, "MODALIAS=")) - return -ENOMEM; - - len = create_of_modalias(adev, &env->buf[env->buflen - 1], - sizeof(env->buf) - env->buflen); - if (len < 0) - return len; - - env->buflen += len; - - return 0; -} - -/* - * Creates uevent modalias field for ACPI enumerated devices. - * Because the other buses does not support ACPI HIDs & CIDs. - * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get: - * "acpi:IBM0001:ACPI0001" - */ -int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env) -{ - return __acpi_device_uevent_modalias(acpi_companion_match(dev), env); -} -EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias); - -static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size) -{ - int len, count; - - if (!adev) - return -ENODEV; - - if (list_empty(&adev->pnp.ids)) - return 0; - - len = create_pnp_modalias(adev, buf, size - 1); - if (len < 0) { - return len; - } else if (len > 0) { - buf[len++] = '\n'; - size -= len; - } - if (!adev->data.of_compatible) - return len; - - count = create_of_modalias(adev, buf + len, size - 1); - if (count < 0) { - return count; - } else if (count > 0) { - len += count; - buf[len++] = '\n'; - } - - return len; -} - -/* - * Creates modalias sysfs attribute for ACPI enumerated devices. - * Because the other buses does not support ACPI HIDs & CIDs. - * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get: - * "acpi:IBM0001:ACPI0001" - */ -int acpi_device_modalias(struct device *dev, char *buf, int size) -{ - return __acpi_device_modalias(acpi_companion_match(dev), buf, size); -} -EXPORT_SYMBOL_GPL(acpi_device_modalias); - -static ssize_t -acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { - return __acpi_device_modalias(to_acpi_device(dev), buf, 1024); -} -static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL); - bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent) { struct acpi_device_physical_node *pn; @@ -701,423 +443,6 @@ void acpi_device_hotplug(struct acpi_device *adev, u32 src) unlock_device_hotplug(); } -static ssize_t real_power_state_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct acpi_device *adev = to_acpi_device(dev); - int state; - int ret; - - ret = acpi_device_get_power(adev, &state); - if (ret) - return ret; - - return sprintf(buf, "%s\n", acpi_power_state_string(state)); -} - -static DEVICE_ATTR(real_power_state, 0444, real_power_state_show, NULL); - -static ssize_t power_state_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct acpi_device *adev = to_acpi_device(dev); - - return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state)); -} - -static DEVICE_ATTR(power_state, 0444, power_state_show, NULL); - -static ssize_t -acpi_eject_store(struct device *d, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct acpi_device *acpi_device = to_acpi_device(d); - acpi_object_type not_used; - acpi_status status; - - if (!count || buf[0] != '1') - return -EINVAL; - - if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled) - && !acpi_device->driver) - return -ENODEV; - - status = acpi_get_type(acpi_device->handle, ¬_used); - if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable) - return -ENODEV; - - get_device(&acpi_device->dev); - status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT); - if (ACPI_SUCCESS(status)) - return count; - - put_device(&acpi_device->dev); - acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT, - ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL); - return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN; -} - -static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store); - -static ssize_t -acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct acpi_device *acpi_dev = to_acpi_device(dev); - - return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev)); -} -static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL); - -static ssize_t acpi_device_uid_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct acpi_device *acpi_dev = to_acpi_device(dev); - - return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id); -} -static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL); - -static ssize_t acpi_device_adr_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct acpi_device *acpi_dev = to_acpi_device(dev); - - return sprintf(buf, "0x%08x\n", - (unsigned int)(acpi_dev->pnp.bus_address)); -} -static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL); - -static ssize_t -acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct acpi_device *acpi_dev = to_acpi_device(dev); - struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL}; - int result; - - result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path); - if (result) - goto end; - - result = sprintf(buf, "%s\n", (char*)path.pointer); - kfree(path.pointer); -end: - return result; -} -static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL); - -/* sysfs file that shows description text from the ACPI _STR method */ -static ssize_t description_show(struct device *dev, - struct device_attribute *attr, - char *buf) { - struct acpi_device *acpi_dev = to_acpi_device(dev); - int result; - - if (acpi_dev->pnp.str_obj == NULL) - return 0; - - /* - * The _STR object contains a Unicode identifier for a device. - * We need to convert to utf-8 so it can be displayed. - */ - result = utf16s_to_utf8s( - (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer, - acpi_dev->pnp.str_obj->buffer.length, - UTF16_LITTLE_ENDIAN, buf, - PAGE_SIZE); - - buf[result++] = '\n'; - - return result; -} -static DEVICE_ATTR(description, 0444, description_show, NULL); - -static ssize_t -acpi_device_sun_show(struct device *dev, struct device_attribute *attr, - char *buf) { - struct acpi_device *acpi_dev = to_acpi_device(dev); - acpi_status status; - unsigned long long sun; - - status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun); - if (ACPI_FAILURE(status)) - return -ENODEV; - - return sprintf(buf, "%llu\n", sun); -} -static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL); - -static ssize_t status_show(struct device *dev, struct device_attribute *attr, - char *buf) { - struct acpi_device *acpi_dev = to_acpi_device(dev); - acpi_status status; - unsigned long long sta; - - status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta); - if (ACPI_FAILURE(status)) - return -ENODEV; - - return sprintf(buf, "%llu\n", sta); -} -static DEVICE_ATTR_RO(status); - -static int acpi_device_setup_files(struct acpi_device *dev) -{ - struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; - acpi_status status; - int result = 0; - - /* - * Devices gotten from FADT don't have a "path" attribute - */ - if (dev->handle) { - result = device_create_file(&dev->dev, &dev_attr_path); - if (result) - goto end; - } - - if (!list_empty(&dev->pnp.ids)) { - result = device_create_file(&dev->dev, &dev_attr_hid); - if (result) - goto end; - - result = device_create_file(&dev->dev, &dev_attr_modalias); - if (result) - goto end; - } - - /* - * If device has _STR, 'description' file is created - */ - if (acpi_has_method(dev->handle, "_STR")) { - status = acpi_evaluate_object(dev->handle, "_STR", - NULL, &buffer); - if (ACPI_FAILURE(status)) - buffer.pointer = NULL; - dev->pnp.str_obj = buffer.pointer; - result = device_create_file(&dev->dev, &dev_attr_description); - if (result) - goto end; - } - - if (dev->pnp.type.bus_address) - result = device_create_file(&dev->dev, &dev_attr_adr); - if (dev->pnp.unique_id) - result = device_create_file(&dev->dev, &dev_attr_uid); - - if (acpi_has_method(dev->handle, "_SUN")) { - result = device_create_file(&dev->dev, &dev_attr_sun); - if (result) - goto end; - } - - if (acpi_has_method(dev->handle, "_STA")) { - result = device_create_file(&dev->dev, &dev_attr_status); - if (result) - goto end; - } - - /* - * If device has _EJ0, 'eject' file is created that is used to trigger - * hot-removal function from userland. - */ - if (acpi_has_method(dev->handle, "_EJ0")) { - result = device_create_file(&dev->dev, &dev_attr_eject); - if (result) - return result; - } - - if (dev->flags.power_manageable) { - result = device_create_file(&dev->dev, &dev_attr_power_state); - if (result) - return result; - - if (dev->power.flags.power_resources) - result = device_create_file(&dev->dev, - &dev_attr_real_power_state); - } - -end: - return result; -} - -static void acpi_device_remove_files(struct acpi_device *dev) -{ - if (dev->flags.power_manageable) { - device_remove_file(&dev->dev, &dev_attr_power_state); - if (dev->power.flags.power_resources) - device_remove_file(&dev->dev, - &dev_attr_real_power_state); - } - - /* - * If device has _STR, remove 'description' file - */ - if (acpi_has_method(dev->handle, "_STR")) { - kfree(dev->pnp.str_obj); - device_remove_file(&dev->dev, &dev_attr_description); - } - /* - * If device has _EJ0, remove 'eject' file. - */ - if (acpi_has_method(dev->handle, "_EJ0")) - device_remove_file(&dev->dev, &dev_attr_eject); - - if (acpi_has_method(dev->handle, "_SUN")) - device_remove_file(&dev->dev, &dev_attr_sun); - - if (dev->pnp.unique_id) - device_remove_file(&dev->dev, &dev_attr_uid); - if (dev->pnp.type.bus_address) - device_remove_file(&dev->dev, &dev_attr_adr); - device_remove_file(&dev->dev, &dev_attr_modalias); - device_remove_file(&dev->dev, &dev_attr_hid); - if (acpi_has_method(dev->handle, "_STA")) - device_remove_file(&dev->dev, &dev_attr_status); - if (dev->handle) - device_remove_file(&dev->dev, &dev_attr_path); -} -/* -------------------------------------------------------------------------- - ACPI Bus operations - -------------------------------------------------------------------------- */ - -/** - * acpi_of_match_device - Match device object using the "compatible" property. - * @adev: ACPI device object to match. - * @of_match_table: List of device IDs to match against. - * - * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of - * identifiers and a _DSD object with the "compatible" property, use that - * property to match against the given list of identifiers. - */ -static bool acpi_of_match_device(struct acpi_device *adev, - const struct of_device_id *of_match_table) -{ - const union acpi_object *of_compatible, *obj; - int i, nval; - - if (!adev) - return false; - - of_compatible = adev->data.of_compatible; - if (!of_match_table || !of_compatible) - return false; - - if (of_compatible->type == ACPI_TYPE_PACKAGE) { - nval = of_compatible->package.count; - obj = of_compatible->package.elements; - } else { /* Must be ACPI_TYPE_STRING. */ - nval = 1; - obj = of_compatible; - } - /* Now we can look for the driver DT compatible strings */ - for (i = 0; i < nval; i++, obj++) { - const struct of_device_id *id; - - for (id = of_match_table; id->compatible[0]; id++) - if (!strcasecmp(obj->string.pointer, id->compatible)) - return true; - } - - return false; -} - -static bool __acpi_match_device_cls(const struct acpi_device_id *id, - struct acpi_hardware_id *hwid) -{ - int i, msk, byte_shift; - char buf[3]; - - if (!id->cls) - return false; - - /* Apply class-code bitmask, before checking each class-code byte */ - for (i = 1; i <= 3; i++) { - byte_shift = 8 * (3 - i); - msk = (id->cls_msk >> byte_shift) & 0xFF; - if (!msk) - continue; - - sprintf(buf, "%02x", (id->cls >> byte_shift) & msk); - if (strncmp(buf, &hwid->id[(i - 1) * 2], 2)) - return false; - } - return true; -} - -static const struct acpi_device_id *__acpi_match_device( - struct acpi_device *device, - const struct acpi_device_id *ids, - const struct of_device_id *of_ids) -{ - const struct acpi_device_id *id; - struct acpi_hardware_id *hwid; - - /* - * If the device is not present, it is unnecessary to load device - * driver for it. - */ - if (!device || !device->status.present) - return NULL; - - list_for_each_entry(hwid, &device->pnp.ids, list) { - /* First, check the ACPI/PNP IDs provided by the caller. */ - for (id = ids; id->id[0] || id->cls; id++) { - if (id->id[0] && !strcmp((char *) id->id, hwid->id)) - return id; - else if (id->cls && __acpi_match_device_cls(id, hwid)) - return id; - } - - /* - * Next, check ACPI_DT_NAMESPACE_HID and try to match the - * "compatible" property if found. - * - * The id returned by the below is not valid, but the only - * caller passing non-NULL of_ids here is only interested in - * whether or not the return value is NULL. - */ - if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id) - && acpi_of_match_device(device, of_ids)) - return id; - } - return NULL; -} - -/** - * acpi_match_device - Match a struct device against a given list of ACPI IDs - * @ids: Array of struct acpi_device_id object to match against. - * @dev: The device structure to match. - * - * Check if @dev has a valid ACPI handle and if there is a struct acpi_device - * object for that handle and use that object to match against a given list of - * device IDs. - * - * Return a pointer to the first matching ID on success or %NULL on failure. - */ -const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, - const struct device *dev) -{ - return __acpi_match_device(acpi_companion_match(dev), ids, NULL); -} -EXPORT_SYMBOL_GPL(acpi_match_device); - -int acpi_match_device_ids(struct acpi_device *device, - const struct acpi_device_id *ids) -{ - return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT; -} -EXPORT_SYMBOL(acpi_match_device_ids); - -bool acpi_driver_match_device(struct device *dev, - const struct device_driver *drv) -{ - if (!drv->acpi_match_table) - return acpi_of_match_device(ACPI_COMPANION(dev), - drv->of_match_table); - - return !!__acpi_match_device(acpi_companion_match(dev), - drv->acpi_match_table, drv->of_match_table); -} -EXPORT_SYMBOL_GPL(acpi_driver_match_device); - static void acpi_free_power_resources_lists(struct acpi_device *device) { int i; @@ -1144,144 +469,6 @@ static void acpi_device_release(struct device *dev) kfree(acpi_dev); } -static int acpi_bus_match(struct device *dev, struct device_driver *drv) -{ - struct acpi_device *acpi_dev = to_acpi_device(dev); - struct acpi_driver *acpi_drv = to_acpi_driver(drv); - - return acpi_dev->flags.match_driver - && !acpi_match_device_ids(acpi_dev, acpi_drv->ids); -} - -static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env) -{ - return __acpi_device_uevent_modalias(to_acpi_device(dev), env); -} - -static void acpi_device_notify(acpi_handle handle, u32 event, void *data) -{ - struct acpi_device *device = data; - - device->driver->ops.notify(device, event); -} - -static void acpi_device_notify_fixed(void *data) -{ - struct acpi_device *device = data; - - /* Fixed hardware devices have no handles */ - acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device); -} - -static u32 acpi_device_fixed_event(void *data) -{ - acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data); - return ACPI_INTERRUPT_HANDLED; -} - -static int acpi_device_install_notify_handler(struct acpi_device *device) -{ - acpi_status status; - - if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) - status = - acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, - acpi_device_fixed_event, - device); - else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) - status = - acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, - acpi_device_fixed_event, - device); - else - status = acpi_install_notify_handler(device->handle, - ACPI_DEVICE_NOTIFY, - acpi_device_notify, - device); - - if (ACPI_FAILURE(status)) - return -EINVAL; - return 0; -} - -static void acpi_device_remove_notify_handler(struct acpi_device *device) -{ - if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) - acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, - acpi_device_fixed_event); - else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) - acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, - acpi_device_fixed_event); - else - acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, - acpi_device_notify); -} - -static int acpi_device_probe(struct device *dev) -{ - struct acpi_device *acpi_dev = to_acpi_device(dev); - struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver); - int ret; - - if (acpi_dev->handler && !acpi_is_pnp_device(acpi_dev)) - return -EINVAL; - - if (!acpi_drv->ops.add) - return -ENOSYS; - - ret = acpi_drv->ops.add(acpi_dev); - if (ret) - return ret; - - acpi_dev->driver = acpi_drv; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Driver [%s] successfully bound to device [%s]\n", - acpi_drv->name, acpi_dev->pnp.bus_id)); - - if (acpi_drv->ops.notify) { - ret = acpi_device_install_notify_handler(acpi_dev); - if (ret) { - if (acpi_drv->ops.remove) - acpi_drv->ops.remove(acpi_dev); - - acpi_dev->driver = NULL; - acpi_dev->driver_data = NULL; - return ret; - } - } - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n", - acpi_drv->name, acpi_dev->pnp.bus_id)); - get_device(dev); - return 0; -} - -static int acpi_device_remove(struct device * dev) -{ - struct acpi_device *acpi_dev = to_acpi_device(dev); - struct acpi_driver *acpi_drv = acpi_dev->driver; - - if (acpi_drv) { - if (acpi_drv->ops.notify) - acpi_device_remove_notify_handler(acpi_dev); - if (acpi_drv->ops.remove) - acpi_drv->ops.remove(acpi_dev); - } - acpi_dev->driver = NULL; - acpi_dev->driver_data = NULL; - - put_device(dev); - return 0; -} - -struct bus_type acpi_bus_type = { - .name = "acpi", - .match = acpi_bus_match, - .probe = acpi_device_probe, - .remove = acpi_device_remove, - .uevent = acpi_device_uevent, -}; - static void acpi_device_del(struct acpi_device *device) { mutex_lock(&acpi_device_lock); @@ -1528,47 +715,6 @@ struct acpi_device *acpi_get_next_child(struct device *dev, return next == head ? NULL : list_entry(next, struct acpi_device, node); } -/* -------------------------------------------------------------------------- - Driver Management - -------------------------------------------------------------------------- */ -/** - * acpi_bus_register_driver - register a driver with the ACPI bus - * @driver: driver being registered - * - * Registers a driver with the ACPI bus. Searches the namespace for all - * devices that match the driver's criteria and binds. Returns zero for - * success or a negative error status for failure. - */ -int acpi_bus_register_driver(struct acpi_driver *driver) -{ - int ret; - - if (acpi_disabled) - return -ENODEV; - driver->drv.name = driver->name; - driver->drv.bus = &acpi_bus_type; - driver->drv.owner = driver->owner; - - ret = driver_register(&driver->drv); - return ret; -} - -EXPORT_SYMBOL(acpi_bus_register_driver); - -/** - * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus - * @driver: driver to unregister - * - * Unregisters a driver with the ACPI bus. Searches the namespace for all - * devices that match the driver's criteria and unbinds. - */ -void acpi_bus_unregister_driver(struct acpi_driver *driver) -{ - driver_unregister(&driver->drv); -} - -EXPORT_SYMBOL(acpi_bus_unregister_driver); - /* -------------------------------------------------------------------------- Device Enumeration -------------------------------------------------------------------------- */ @@ -2744,12 +1890,6 @@ int __init acpi_scan_init(void) { int result; - result = bus_register(&acpi_bus_type); - if (result) { - /* We don't want to quit even if we failed to add suspend/resume */ - printk(KERN_ERR PREFIX "Could not register bus type\n"); - } - acpi_pci_root_init(); acpi_pci_link_init(); acpi_processor_init(); diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 0876d77b3..40a426552 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -69,6 +69,8 @@ static const struct acpi_dlevel acpi_debug_levels[] = { ACPI_DEBUG_INIT(ACPI_LV_INIT), ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT), ACPI_DEBUG_INIT(ACPI_LV_INFO), + ACPI_DEBUG_INIT(ACPI_LV_REPAIR), + ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT), ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES), ACPI_DEBUG_INIT(ACPI_LV_PARSE), @@ -162,55 +164,116 @@ static const struct kernel_param_ops param_ops_debug_level = { module_param_cb(debug_layer, ¶m_ops_debug_layer, &acpi_dbg_layer, 0644); module_param_cb(debug_level, ¶m_ops_debug_level, &acpi_dbg_level, 0644); -static char trace_method_name[6]; -module_param_string(trace_method_name, trace_method_name, 6, 0644); -static unsigned int trace_debug_layer; -module_param(trace_debug_layer, uint, 0644); -static unsigned int trace_debug_level; -module_param(trace_debug_level, uint, 0644); +static char trace_method_name[1024]; -static int param_set_trace_state(const char *val, struct kernel_param *kp) +int param_set_trace_method_name(const char *val, const struct kernel_param *kp) { - int result = 0; + u32 saved_flags = 0; + bool is_abs_path = true; - if (!strncmp(val, "enable", sizeof("enable") - 1)) { - result = acpi_debug_trace(trace_method_name, trace_debug_level, - trace_debug_layer, 0); - if (result) - result = -EBUSY; - goto exit; - } + if (*val != '\\') + is_abs_path = false; - if (!strncmp(val, "disable", sizeof("disable") - 1)) { - int name = 0; - result = acpi_debug_trace((char *)&name, trace_debug_level, - trace_debug_layer, 0); - if (result) - result = -EBUSY; - goto exit; + if ((is_abs_path && strlen(val) > 1023) || + (!is_abs_path && strlen(val) > 1022)) { + pr_err("%s: string parameter too long\n", kp->name); + return -ENOSPC; } - if (!strncmp(val, "1", 1)) { - result = acpi_debug_trace(trace_method_name, trace_debug_level, - trace_debug_layer, 1); - if (result) - result = -EBUSY; - goto exit; + /* + * It's not safe to update acpi_gbl_trace_method_name without + * having the tracer stopped, so we save the original tracer + * state and disable it. + */ + saved_flags = acpi_gbl_trace_flags; + (void)acpi_debug_trace(NULL, + acpi_gbl_trace_dbg_level, + acpi_gbl_trace_dbg_layer, + 0); + + /* This is a hack. We can't kmalloc in early boot. */ + if (is_abs_path) + strcpy(trace_method_name, val); + else { + trace_method_name[0] = '\\'; + strcpy(trace_method_name+1, val); } - result = -EINVAL; -exit: - return result; + /* Restore the original tracer state */ + (void)acpi_debug_trace(trace_method_name, + acpi_gbl_trace_dbg_level, + acpi_gbl_trace_dbg_layer, + saved_flags); + + return 0; +} + +static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp) +{ + return scnprintf(buffer, PAGE_SIZE, "%s", acpi_gbl_trace_method_name); +} + +static const struct kernel_param_ops param_ops_trace_method = { + .set = param_set_trace_method_name, + .get = param_get_trace_method_name, +}; + +static const struct kernel_param_ops param_ops_trace_attrib = { + .set = param_set_uint, + .get = param_get_uint, +}; + +module_param_cb(trace_method_name, ¶m_ops_trace_method, &trace_method_name, 0644); +module_param_cb(trace_debug_layer, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644); +module_param_cb(trace_debug_level, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644); + +static int param_set_trace_state(const char *val, struct kernel_param *kp) +{ + acpi_status status; + const char *method = trace_method_name; + u32 flags = 0; + +/* So "xxx-once" comparison should go prior than "xxx" comparison */ +#define acpi_compare_param(val, key) \ + strncmp((val), (key), sizeof(key) - 1) + + if (!acpi_compare_param(val, "enable")) { + method = NULL; + flags = ACPI_TRACE_ENABLED; + } else if (!acpi_compare_param(val, "disable")) + method = NULL; + else if (!acpi_compare_param(val, "method-once")) + flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT; + else if (!acpi_compare_param(val, "method")) + flags = ACPI_TRACE_ENABLED; + else if (!acpi_compare_param(val, "opcode-once")) + flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE; + else if (!acpi_compare_param(val, "opcode")) + flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE; + else + return -EINVAL; + + status = acpi_debug_trace(method, + acpi_gbl_trace_dbg_level, + acpi_gbl_trace_dbg_layer, + flags); + if (ACPI_FAILURE(status)) + return -EBUSY; + + return 0; } static int param_get_trace_state(char *buffer, struct kernel_param *kp) { - if (!acpi_gbl_trace_method_name) + if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED)) return sprintf(buffer, "disable"); else { - if (acpi_gbl_trace_flags & 1) - return sprintf(buffer, "1"); - else + if (acpi_gbl_trace_method_name) { + if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) + return sprintf(buffer, "method-once"); + else + return sprintf(buffer, "method"); + } else return sprintf(buffer, "enable"); } return 0; diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 2e19189da..17a6fa01a 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -15,10 +15,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * */ diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 6d4e44ea7..30d8518b2 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -16,10 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This driver fully implements the ACPI thermal policy as described in the @@ -529,8 +525,7 @@ static void acpi_thermal_check(void *data) /* sys I/F for generic thermal sysfs support */ -static int thermal_get_temp(struct thermal_zone_device *thermal, - unsigned long *temp) +static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp) { struct acpi_thermal *tz = thermal->devdata; int result; @@ -637,7 +632,7 @@ static int thermal_get_trip_type(struct thermal_zone_device *thermal, } static int thermal_get_trip_temp(struct thermal_zone_device *thermal, - int trip, unsigned long *temp) + int trip, int *temp) { struct acpi_thermal *tz = thermal->devdata; int i; @@ -690,7 +685,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal, } static int thermal_get_crit_temp(struct thermal_zone_device *thermal, - unsigned long *temperature) { + int *temperature) +{ struct acpi_thermal *tz = thermal->devdata; if (tz->trips.critical.flags.valid) { @@ -713,8 +709,8 @@ static int thermal_get_trend(struct thermal_zone_device *thermal, return -EINVAL; if (type == THERMAL_TRIP_ACTIVE) { - unsigned long trip_temp; - unsigned long temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET( + int trip_temp; + int temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET( tz->temperature, tz->kelvin_offset); if (thermal_get_trip_temp(thermal, trip, &trip_temp)) return -EINVAL; diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index 67c548ad3..475c9079b 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -16,10 +16,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 6607f3c6a..a39e85f9e 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2834,7 +2834,7 @@ static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } -static struct vm_operations_struct binder_vm_ops = { +static const struct vm_operations_struct binder_vm_ops = { .open = binder_vma_open, .close = binder_vma_close, .fault = binder_vm_fault, diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index b122d497a..d0bacf06a 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4761,6 +4761,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words) /** * ata_qc_new_init - Request an available ATA command, and initialize it * @dev: Device from whom we request an available command structure + * @tag: tag * * LOCKING: * None. diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index 5d9ee99c2..80fe0f6fe 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c @@ -834,7 +834,7 @@ static int arasan_cf_probe(struct platform_device *pdev) return -ENOMEM; } - acdev->clk = clk_get(&pdev->dev, NULL); + acdev->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(acdev->clk)) { dev_warn(&pdev->dev, "Clock not found\n"); return PTR_ERR(acdev->clk); @@ -843,9 +843,8 @@ static int arasan_cf_probe(struct platform_device *pdev) /* allocate host */ host = ata_host_alloc(&pdev->dev, 1); if (!host) { - ret = -ENOMEM; dev_warn(&pdev->dev, "alloc host fail\n"); - goto free_clk; + return -ENOMEM; } ap = host->ports[0]; @@ -894,7 +893,7 @@ static int arasan_cf_probe(struct platform_device *pdev) ret = cf_init(acdev); if (ret) - goto free_clk; + return ret; cf_card_detect(acdev, 0); @@ -904,8 +903,7 @@ static int arasan_cf_probe(struct platform_device *pdev) return 0; cf_exit(acdev); -free_clk: - clk_put(acdev->clk); + return ret; } @@ -916,7 +914,6 @@ static int arasan_cf_remove(struct platform_device *pdev) ata_host_detach(host); cf_exit(acdev); - clk_put(acdev->clk); return 0; } diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c index 6d08446b8..12fe0f3bb 100644 --- a/drivers/ata/pata_rb532_cf.c +++ b/drivers/ata/pata_rb532_cf.c @@ -27,12 +27,11 @@ #include #include #include +#include #include #include -#include - #define DRV_NAME "pata-rb532-cf" #define DRV_VERSION "0.1.0" #define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash" diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index d49a5193b..8804127b1 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -861,10 +861,6 @@ MODULE_DEVICE_TABLE(of, sata_rcar_match); static const struct platform_device_id sata_rcar_id_table[] = { { "sata_rcar", RCAR_GEN1_SATA }, /* Deprecated by "sata-r8a7779" */ { "sata-r8a7779", RCAR_GEN1_SATA }, - { "sata-r8a7790", RCAR_GEN2_SATA }, - { "sata-r8a7790-es1", RCAR_R8A7790_ES1_SATA }, - { "sata-r8a7791", RCAR_GEN2_SATA }, - { "sata-r8a7793", RCAR_GEN2_SATA }, { }, }; MODULE_DEVICE_TABLE(platform, sata_rcar_id_table); diff --git a/drivers/atm/he.c b/drivers/atm/he.c index a8da3a50e..0f5cb3763 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -1578,9 +1578,7 @@ he_stop(struct he_dev *he_dev) kfree(he_dev->rbpl_virt); kfree(he_dev->rbpl_table); - - if (he_dev->rbpl_pool) - dma_pool_destroy(he_dev->rbpl_pool); + dma_pool_destroy(he_dev->rbpl_pool); if (he_dev->rbrq_base) dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), @@ -1594,8 +1592,7 @@ he_stop(struct he_dev *he_dev) dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), he_dev->tpdrq_base, he_dev->tpdrq_phys); - if (he_dev->tpd_pool) - dma_pool_destroy(he_dev->tpd_pool); + dma_pool_destroy(he_dev->tpd_pool); if (he_dev->pci_dev) { pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command); diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index af5e66a7c..ea62513dc 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -803,7 +803,12 @@ static void solos_bh(unsigned long card_arg) continue; } - skb = alloc_skb(size + 1, GFP_ATOMIC); + /* Use netdev_alloc_skb() because it adds NET_SKB_PAD of + * headroom, and ensures we can route packets back out an + * Ethernet interface (for example) without having to + * reallocate. Adding NET_IP_ALIGN also ensures that both + * PPPoATM and PPPoEoBR2684 packets end up aligned. */ + skb = netdev_alloc_skb_ip_align(NULL, size + 1); if (!skb) { if (net_ratelimit()) dev_warn(&card->dev->dev, "Failed to allocate sk_buff for RX\n"); @@ -867,7 +872,10 @@ static void solos_bh(unsigned long card_arg) /* Allocate RX skbs for any ports which need them */ if (card->using_dma && card->atmdev[port] && !card->rx_skb[port]) { - struct sk_buff *skb = alloc_skb(RX_DMA_SIZE, GFP_ATOMIC); + /* Unlike the MMIO case (qv) we can't add NET_IP_ALIGN + * here; the FPGA can only DMA to addresses which are + * aligned to 4 bytes. */ + struct sk_buff *skb = dev_alloc_skb(RX_DMA_SIZE); if (skb) { SKB_CB(skb)->dma_addr = dma_map_single(&card->dev->dev, skb->data, diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c index 0d752851a..816de9eaa 100644 --- a/drivers/auxdisplay/ks0108.c +++ b/drivers/auxdisplay/ks0108.c @@ -23,6 +23,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -90,17 +92,19 @@ void ks0108_displaystate(unsigned char state) void ks0108_startline(unsigned char startline) { - ks0108_writedata(min(startline,(unsigned char)63) | bit(6) | bit(7)); + ks0108_writedata(min_t(unsigned char, startline, 63) | bit(6) | + bit(7)); } void ks0108_address(unsigned char address) { - ks0108_writedata(min(address,(unsigned char)63) | bit(6)); + ks0108_writedata(min_t(unsigned char, address, 63) | bit(6)); } void ks0108_page(unsigned char page) { - ks0108_writedata(min(page,(unsigned char)7) | bit(3) | bit(4) | bit(5) | bit(7)); + ks0108_writedata(min_t(unsigned char, page, 7) | bit(3) | bit(4) | + bit(5) | bit(7)); } EXPORT_SYMBOL_GPL(ks0108_writedata); @@ -121,53 +125,71 @@ unsigned char ks0108_isinited(void) } EXPORT_SYMBOL_GPL(ks0108_isinited); -/* - * Module Init & Exit - */ - -static int __init ks0108_init(void) +static void ks0108_parport_attach(struct parport *port) { - int result; - int ret = -EINVAL; - - ks0108_parport = parport_find_base(ks0108_port); - if (ks0108_parport == NULL) { - printk(KERN_ERR KS0108_NAME ": ERROR: " - "parport didn't find %i port\n", ks0108_port); - goto none; - } - - ks0108_pardevice = parport_register_device(ks0108_parport, KS0108_NAME, - NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL); - parport_put_port(ks0108_parport); - if (ks0108_pardevice == NULL) { - printk(KERN_ERR KS0108_NAME ": ERROR: " - "parport didn't register new device\n"); - goto none; + struct pardev_cb ks0108_cb; + + if (port->base != ks0108_port) + return; + + memset(&ks0108_cb, 0, sizeof(ks0108_cb)); + ks0108_cb.flags = PARPORT_DEV_EXCL; + ks0108_pardevice = parport_register_dev_model(port, KS0108_NAME, + &ks0108_cb, 0); + if (!ks0108_pardevice) { + pr_err("ERROR: parport didn't register new device\n"); + return; } - - result = parport_claim(ks0108_pardevice); - if (result != 0) { - printk(KERN_ERR KS0108_NAME ": ERROR: " - "can't claim %i parport, maybe in use\n", ks0108_port); - ret = result; - goto registered; + if (parport_claim(ks0108_pardevice)) { + pr_err("could not claim access to parport %i. Aborting.\n", + ks0108_port); + goto err_unreg_device; } + ks0108_parport = port; ks0108_inited = 1; - return 0; + return; -registered: +err_unreg_device: parport_unregister_device(ks0108_pardevice); - -none: - return ret; + ks0108_pardevice = NULL; } -static void __exit ks0108_exit(void) +static void ks0108_parport_detach(struct parport *port) { + if (port->base != ks0108_port) + return; + + if (!ks0108_pardevice) { + pr_err("%s: already unregistered.\n", KS0108_NAME); + return; + } + parport_release(ks0108_pardevice); parport_unregister_device(ks0108_pardevice); + ks0108_pardevice = NULL; + ks0108_parport = NULL; +} + +/* + * Module Init & Exit + */ + +static struct parport_driver ks0108_parport_driver = { + .name = "ks0108", + .match_port = ks0108_parport_attach, + .detach = ks0108_parport_detach, + .devmodel = true, +}; + +static int __init ks0108_init(void) +{ + return parport_register_driver(&ks0108_parport_driver); +} + +static void __exit ks0108_exit(void) +{ + parport_unregister_driver(&ks0108_parport_driver); } module_init(ks0108_init); diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 527d29170..6b2a84e7f 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_REGMAP) += regmap/ obj-$(CONFIG_SOC_BUS) += soc.o obj-$(CONFIG_PINCTRL) += pinctrl.o obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o +obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG diff --git a/drivers/base/base.h b/drivers/base/base.h index fd3347d9f..1782f3aa3 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -63,7 +63,7 @@ struct driver_private { * binding of drivers which were unable to get all the resources needed by * the device; typically because it depends on another driver getting * probed first. - * @device - pointer back to the struct class that this structure is + * @device - pointer back to the struct device that this structure is * associated with. * * Nothing outside of the driver core should ever touch these fields. @@ -134,6 +134,7 @@ extern int devres_release_all(struct device *dev); /* /sys/devices directory */ extern struct kset *devices_kset; +extern void devices_kset_move_last(struct device *dev); #if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS) extern void module_add_driver(struct module *mod, struct device_driver *drv); diff --git a/drivers/base/core.c b/drivers/base/core.c index dafae6d2f..334ec7ef1 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -533,6 +533,52 @@ static DEVICE_ATTR_RO(dev); /* /sys/devices/ */ struct kset *devices_kset; +/** + * devices_kset_move_before - Move device in the devices_kset's list. + * @deva: Device to move. + * @devb: Device @deva should come before. + */ +static void devices_kset_move_before(struct device *deva, struct device *devb) +{ + if (!devices_kset) + return; + pr_debug("devices_kset: Moving %s before %s\n", + dev_name(deva), dev_name(devb)); + spin_lock(&devices_kset->list_lock); + list_move_tail(&deva->kobj.entry, &devb->kobj.entry); + spin_unlock(&devices_kset->list_lock); +} + +/** + * devices_kset_move_after - Move device in the devices_kset's list. + * @deva: Device to move + * @devb: Device @deva should come after. + */ +static void devices_kset_move_after(struct device *deva, struct device *devb) +{ + if (!devices_kset) + return; + pr_debug("devices_kset: Moving %s after %s\n", + dev_name(deva), dev_name(devb)); + spin_lock(&devices_kset->list_lock); + list_move(&deva->kobj.entry, &devb->kobj.entry); + spin_unlock(&devices_kset->list_lock); +} + +/** + * devices_kset_move_last - move the device to the end of devices_kset's list. + * @dev: device to move + */ +void devices_kset_move_last(struct device *dev) +{ + if (!devices_kset) + return; + pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev)); + spin_lock(&devices_kset->list_lock); + list_move_tail(&dev->kobj.entry, &devices_kset->list); + spin_unlock(&devices_kset->list_lock); +} + /** * device_create_file - create sysfs attribute file for device. * @dev: device. @@ -662,6 +708,9 @@ void device_initialize(struct device *dev) INIT_LIST_HEAD(&dev->devres_head); device_pm_init(dev); set_dev_node(dev, -1); +#ifdef CONFIG_GENERIC_MSI_IRQ + INIT_LIST_HEAD(&dev->msi_list); +#endif } EXPORT_SYMBOL_GPL(device_initialize); @@ -1252,6 +1301,19 @@ void device_unregister(struct device *dev) } EXPORT_SYMBOL_GPL(device_unregister); +static struct device *prev_device(struct klist_iter *i) +{ + struct klist_node *n = klist_prev(i); + struct device *dev = NULL; + struct device_private *p; + + if (n) { + p = to_device_private_parent(n); + dev = p->device; + } + return dev; +} + static struct device *next_device(struct klist_iter *i) { struct klist_node *n = klist_next(i); @@ -1340,6 +1402,36 @@ int device_for_each_child(struct device *parent, void *data, } EXPORT_SYMBOL_GPL(device_for_each_child); +/** + * device_for_each_child_reverse - device child iterator in reversed order. + * @parent: parent struct device. + * @fn: function to be called for each device. + * @data: data for the callback. + * + * Iterate over @parent's child devices, and call @fn for each, + * passing it @data. + * + * We check the return of @fn each time. If it returns anything + * other than 0, we break out and return that value. + */ +int device_for_each_child_reverse(struct device *parent, void *data, + int (*fn)(struct device *dev, void *data)) +{ + struct klist_iter i; + struct device *child; + int error = 0; + + if (!parent->p) + return 0; + + klist_iter_init(&parent->p->klist_children, &i); + while ((child = prev_device(&i)) && !error) + error = fn(child, data); + klist_iter_exit(&i); + return error; +} +EXPORT_SYMBOL_GPL(device_for_each_child_reverse); + /** * device_find_child - device iterator for locating a particular device. * @parent: parent struct device @@ -1923,12 +2015,15 @@ int device_move(struct device *dev, struct device *new_parent, break; case DPM_ORDER_DEV_AFTER_PARENT: device_pm_move_after(dev, new_parent); + devices_kset_move_after(dev, new_parent); break; case DPM_ORDER_PARENT_BEFORE_DEV: device_pm_move_before(new_parent, dev); + devices_kset_move_before(new_parent, dev); break; case DPM_ORDER_DEV_LAST: device_pm_move_last(dev); + devices_kset_move_last(dev); break; } diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 78720e706..91bbb1959 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -41,7 +41,7 @@ static void change_cpu_under_node(struct cpu *cpu, cpu->node_id = to_nid; } -static int __ref cpu_subsys_online(struct device *dev) +static int cpu_subsys_online(struct device *dev) { struct cpu *cpu = container_of(dev, struct cpu, dev); int cpuid = dev->id; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index a638bbb1a..be0eb4639 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -304,6 +304,14 @@ static int really_probe(struct device *dev, struct device_driver *drv) goto probe_failed; } + /* + * Ensure devices are listed in devices_kset in correct order + * It's important to move Dev to the end of devices_kset before + * calling .probe, because it could be recursive and parent Dev + * should always go first + */ + devices_kset_move_last(dev); + if (dev->bus->probe) { ret = dev->bus->probe(dev); if (ret) @@ -399,6 +407,8 @@ EXPORT_SYMBOL_GPL(wait_for_device_probe); * * This function must be called with @dev lock held. When called for a * USB interface, @dev->parent lock must be held as well. + * + * If the device has a parent, runtime-resume the parent before driver probing. */ int driver_probe_device(struct device_driver *drv, struct device *dev) { @@ -410,10 +420,16 @@ int driver_probe_device(struct device_driver *drv, struct device *dev) pr_debug("bus: '%s': %s: matched device %s with driver %s\n", drv->bus->name, __func__, dev_name(dev), drv->name); + if (dev->parent) + pm_runtime_get_sync(dev->parent); + pm_runtime_barrier(dev); ret = really_probe(dev, drv); pm_request_idle(dev); + if (dev->parent) + pm_runtime_put(dev->parent); + return ret; } @@ -507,11 +523,17 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie) device_lock(dev); + if (dev->parent) + pm_runtime_get_sync(dev->parent); + bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver); dev_dbg(dev, "async probe completed\n"); pm_request_idle(dev); + if (dev->parent) + pm_runtime_put(dev->parent); + device_unlock(dev); put_device(dev); @@ -541,6 +563,9 @@ static int __device_attach(struct device *dev, bool allow_async) .want_async = false, }; + if (dev->parent) + pm_runtime_get_sync(dev->parent); + ret = bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver); if (!ret && allow_async && data.have_async) { @@ -557,6 +582,9 @@ static int __device_attach(struct device *dev, bool allow_async) } else { pm_request_idle(dev); } + + if (dev->parent) + pm_runtime_put(dev->parent); } out_unlock: device_unlock(dev); diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 950fff9ce..a12ff9863 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -187,7 +187,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, * global one. Requires architecture specific dev_get_cma_area() helper * function. */ -struct page *dma_alloc_from_contiguous(struct device *dev, int count, +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, unsigned int align) { if (align > CONFIG_CMA_ALIGNMENT) diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index ac1216e59..d6e69e89f 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -443,7 +443,7 @@ static int fw_add_devm_name(struct device *dev, const char *name) return -ENOMEM; fwn->name = kstrdup_const(name, GFP_KERNEL); if (!fwn->name) { - kfree(fwn); + devres_free(fwn); return -ENOMEM; } diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c new file mode 100644 index 000000000..134483daa --- /dev/null +++ b/drivers/base/platform-msi.c @@ -0,0 +1,270 @@ +/* + * MSI framework for platform devices + * + * Copyright (C) 2015 ARM Limited, All Rights Reserved. + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include + +#define DEV_ID_SHIFT 24 + +/* + * Internal data structure containing a (made up, but unique) devid + * and the callback to write the MSI message. + */ +struct platform_msi_priv_data { + irq_write_msi_msg_t write_msg; + int devid; +}; + +/* The devid allocator */ +static DEFINE_IDA(platform_msi_devid_ida); + +#ifdef GENERIC_MSI_DOMAIN_OPS +/* + * Convert an msi_desc to a globaly unique identifier (per-device + * devid + msi_desc position in the msi_list). + */ +static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc) +{ + u32 devid; + + devid = desc->platform.msi_priv_data->devid; + + return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index; +} + +static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) +{ + arg->desc = desc; + arg->hwirq = platform_msi_calc_hwirq(desc); +} + +static int platform_msi_init(struct irq_domain *domain, + struct msi_domain_info *info, + unsigned int virq, irq_hw_number_t hwirq, + msi_alloc_info_t *arg) +{ + return irq_domain_set_hwirq_and_chip(domain, virq, hwirq, + info->chip, info->chip_data); +} +#else +#define platform_msi_set_desc NULL +#define platform_msi_init NULL +#endif + +static void platform_msi_update_dom_ops(struct msi_domain_info *info) +{ + struct msi_domain_ops *ops = info->ops; + + BUG_ON(!ops); + + if (ops->msi_init == NULL) + ops->msi_init = platform_msi_init; + if (ops->set_desc == NULL) + ops->set_desc = platform_msi_set_desc; +} + +static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct msi_desc *desc = irq_data_get_msi_desc(data); + struct platform_msi_priv_data *priv_data; + + priv_data = desc->platform.msi_priv_data; + + priv_data->write_msg(desc, msg); +} + +static void platform_msi_update_chip_ops(struct msi_domain_info *info) +{ + struct irq_chip *chip = info->chip; + + BUG_ON(!chip); + if (!chip->irq_mask) + chip->irq_mask = irq_chip_mask_parent; + if (!chip->irq_unmask) + chip->irq_unmask = irq_chip_unmask_parent; + if (!chip->irq_eoi) + chip->irq_eoi = irq_chip_eoi_parent; + if (!chip->irq_set_affinity) + chip->irq_set_affinity = msi_domain_set_affinity; + if (!chip->irq_write_msi_msg) + chip->irq_write_msi_msg = platform_msi_write_msg; +} + +static void platform_msi_free_descs(struct device *dev) +{ + struct msi_desc *desc, *tmp; + + list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) { + list_del(&desc->list); + free_msi_entry(desc); + } +} + +static int platform_msi_alloc_descs(struct device *dev, int nvec, + struct platform_msi_priv_data *data) + +{ + int i; + + for (i = 0; i < nvec; i++) { + struct msi_desc *desc; + + desc = alloc_msi_entry(dev); + if (!desc) + break; + + desc->platform.msi_priv_data = data; + desc->platform.msi_index = i; + desc->nvec_used = 1; + + list_add_tail(&desc->list, dev_to_msi_list(dev)); + } + + if (i != nvec) { + /* Clean up the mess */ + platform_msi_free_descs(dev); + + return -ENOMEM; + } + + return 0; +} + +/** + * platform_msi_create_irq_domain - Create a platform MSI interrupt domain + * @np: Optional device-tree node of the interrupt controller + * @info: MSI domain info + * @parent: Parent irq domain + * + * Updates the domain and chip ops and creates a platform MSI + * interrupt domain. + * + * Returns: + * A domain pointer or NULL in case of failure. + */ +struct irq_domain *platform_msi_create_irq_domain(struct device_node *np, + struct msi_domain_info *info, + struct irq_domain *parent) +{ + struct irq_domain *domain; + + if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) + platform_msi_update_dom_ops(info); + if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) + platform_msi_update_chip_ops(info); + + domain = msi_create_irq_domain(np, info, parent); + if (domain) + domain->bus_token = DOMAIN_BUS_PLATFORM_MSI; + + return domain; +} + +/** + * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev + * @dev: The device for which to allocate interrupts + * @nvec: The number of interrupts to allocate + * @write_msi_msg: Callback to write an interrupt message for @dev + * + * Returns: + * Zero for success, or an error code in case of failure + */ +int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, + irq_write_msi_msg_t write_msi_msg) +{ + struct platform_msi_priv_data *priv_data; + int err; + + /* + * Limit the number of interrupts to 256 per device. Should we + * need to bump this up, DEV_ID_SHIFT should be adjusted + * accordingly (which would impact the max number of MSI + * capable devices). + */ + if (!dev->msi_domain || !write_msi_msg || !nvec || + nvec > (1 << (32 - DEV_ID_SHIFT))) + return -EINVAL; + + if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) { + dev_err(dev, "Incompatible msi_domain, giving up\n"); + return -EINVAL; + } + + /* Already had a helping of MSI? Greed... */ + if (!list_empty(dev_to_msi_list(dev))) + return -EBUSY; + + priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL); + if (!priv_data) + return -ENOMEM; + + priv_data->devid = ida_simple_get(&platform_msi_devid_ida, + 0, 1 << DEV_ID_SHIFT, GFP_KERNEL); + if (priv_data->devid < 0) { + err = priv_data->devid; + goto out_free_data; + } + + priv_data->write_msg = write_msi_msg; + + err = platform_msi_alloc_descs(dev, nvec, priv_data); + if (err) + goto out_free_id; + + err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec); + if (err) + goto out_free_desc; + + return 0; + +out_free_desc: + platform_msi_free_descs(dev); +out_free_id: + ida_simple_remove(&platform_msi_devid_ida, priv_data->devid); +out_free_data: + kfree(priv_data); + + return err; +} + +/** + * platform_msi_domain_free_irqs - Free MSI interrupts for @dev + * @dev: The device for which to free interrupts + */ +void platform_msi_domain_free_irqs(struct device *dev) +{ + struct msi_desc *desc; + + desc = first_msi_entry(dev); + if (desc) { + struct platform_msi_priv_data *data; + + data = desc->platform.msi_priv_data; + + ida_simple_remove(&platform_msi_devid_ida, data->devid); + kfree(data); + } + + msi_domain_free_irqs(dev->msi_domain, dev); + platform_msi_free_descs(dev); +} diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 0ee43c105..16550c63d 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -114,8 +114,12 @@ static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) stop_latency_ns, "stop"); } -static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) +static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev, + bool timed) { + if (!timed) + return GENPD_DEV_CALLBACK(genpd, int, start, dev); + return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, start_latency_ns, "start"); } @@ -136,41 +140,6 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) smp_mb__after_atomic(); } -static void genpd_acquire_lock(struct generic_pm_domain *genpd) -{ - DEFINE_WAIT(wait); - - mutex_lock(&genpd->lock); - /* - * Wait for the domain to transition into either the active, - * or the power off state. - */ - for (;;) { - prepare_to_wait(&genpd->status_wait_queue, &wait, - TASK_UNINTERRUPTIBLE); - if (genpd->status == GPD_STATE_ACTIVE - || genpd->status == GPD_STATE_POWER_OFF) - break; - mutex_unlock(&genpd->lock); - - schedule(); - - mutex_lock(&genpd->lock); - } - finish_wait(&genpd->status_wait_queue, &wait); -} - -static void genpd_release_lock(struct generic_pm_domain *genpd) -{ - mutex_unlock(&genpd->lock); -} - -static void genpd_set_active(struct generic_pm_domain *genpd) -{ - if (genpd->resume_count == 0) - genpd->status = GPD_STATE_ACTIVE; -} - static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) { s64 usecs64; @@ -243,6 +212,18 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) return ret; } +/** + * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). + * @genpd: PM domait to power off. + * + * Queue up the execution of pm_genpd_poweroff() unless it's already been done + * before. + */ +static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) +{ + queue_work(pm_wq, &genpd->power_off_work); +} + /** * __pm_genpd_poweron - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. @@ -251,35 +232,14 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) * resume a device belonging to it. */ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) - __releases(&genpd->lock) __acquires(&genpd->lock) { struct gpd_link *link; - DEFINE_WAIT(wait); int ret = 0; - /* If the domain's master is being waited for, we have to wait too. */ - for (;;) { - prepare_to_wait(&genpd->status_wait_queue, &wait, - TASK_UNINTERRUPTIBLE); - if (genpd->status != GPD_STATE_WAIT_MASTER) - break; - mutex_unlock(&genpd->lock); - - schedule(); - - mutex_lock(&genpd->lock); - } - finish_wait(&genpd->status_wait_queue, &wait); - if (genpd->status == GPD_STATE_ACTIVE || (genpd->prepared_count > 0 && genpd->suspend_power_off)) return 0; - if (genpd->status != GPD_STATE_POWER_OFF) { - genpd_set_active(genpd); - return 0; - } - if (genpd->cpuidle_data) { cpuidle_pause_and_lock(); genpd->cpuidle_data->idle_state->disabled = true; @@ -294,20 +254,8 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) */ list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_inc(link->master); - genpd->status = GPD_STATE_WAIT_MASTER; - - mutex_unlock(&genpd->lock); ret = pm_genpd_poweron(link->master); - - mutex_lock(&genpd->lock); - - /* - * The "wait for parent" status is guaranteed not to change - * while the master is powering on. - */ - genpd->status = GPD_STATE_POWER_OFF; - wake_up_all(&genpd->status_wait_queue); if (ret) { genpd_sd_counter_dec(link->master); goto err; @@ -319,13 +267,16 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) goto err; out: - genpd_set_active(genpd); - + genpd->status = GPD_STATE_ACTIVE; return 0; err: - list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) + list_for_each_entry_continue_reverse(link, + &genpd->slave_links, + slave_node) { genpd_sd_counter_dec(link->master); + genpd_queue_power_off_work(link->master); + } return ret; } @@ -356,20 +307,18 @@ int pm_genpd_name_poweron(const char *domain_name) return genpd ? pm_genpd_poweron(genpd) : -EINVAL; } -static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, - struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, start, dev); -} - static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, save_state_latency_ns, "state save"); } -static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) +static int genpd_restore_dev(struct generic_pm_domain *genpd, + struct device *dev, bool timed) { + if (!timed) + return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev); + return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, restore_state_latency_ns, "state restore"); @@ -415,134 +364,31 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, return NOTIFY_DONE; } -/** - * __pm_genpd_save_device - Save the pre-suspend state of a device. - * @pdd: Domain data of the device to save the state of. - * @genpd: PM domain the device belongs to. - */ -static int __pm_genpd_save_device(struct pm_domain_data *pdd, - struct generic_pm_domain *genpd) - __releases(&genpd->lock) __acquires(&genpd->lock) -{ - struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); - struct device *dev = pdd->dev; - int ret = 0; - - if (gpd_data->need_restore > 0) - return 0; - - /* - * If the value of the need_restore flag is still unknown at this point, - * we trust that pm_genpd_poweroff() has verified that the device is - * already runtime PM suspended. - */ - if (gpd_data->need_restore < 0) { - gpd_data->need_restore = 1; - return 0; - } - - mutex_unlock(&genpd->lock); - - genpd_start_dev(genpd, dev); - ret = genpd_save_dev(genpd, dev); - genpd_stop_dev(genpd, dev); - - mutex_lock(&genpd->lock); - - if (!ret) - gpd_data->need_restore = 1; - - return ret; -} - -/** - * __pm_genpd_restore_device - Restore the pre-suspend state of a device. - * @pdd: Domain data of the device to restore the state of. - * @genpd: PM domain the device belongs to. - */ -static void __pm_genpd_restore_device(struct pm_domain_data *pdd, - struct generic_pm_domain *genpd) - __releases(&genpd->lock) __acquires(&genpd->lock) -{ - struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); - struct device *dev = pdd->dev; - int need_restore = gpd_data->need_restore; - - gpd_data->need_restore = 0; - mutex_unlock(&genpd->lock); - - genpd_start_dev(genpd, dev); - - /* - * Call genpd_restore_dev() for recently added devices too (need_restore - * is negative then). - */ - if (need_restore) - genpd_restore_dev(genpd, dev); - - mutex_lock(&genpd->lock); -} - -/** - * genpd_abort_poweroff - Check if a PM domain power off should be aborted. - * @genpd: PM domain to check. - * - * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during - * a "power off" operation, which means that a "power on" has occured in the - * meantime, or if its resume_count field is different from zero, which means - * that one of its devices has been resumed in the meantime. - */ -static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) -{ - return genpd->status == GPD_STATE_WAIT_MASTER - || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; -} - -/** - * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). - * @genpd: PM domait to power off. - * - * Queue up the execution of pm_genpd_poweroff() unless it's already been done - * before. - */ -static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) -{ - queue_work(pm_wq, &genpd->power_off_work); -} - /** * pm_genpd_poweroff - Remove power from a given PM domain. * @genpd: PM domain to power down. * * If all of the @genpd's devices have been suspended and all of its subdomains - * have been powered down, run the runtime suspend callbacks provided by all of - * the @genpd's devices' drivers and remove power from @genpd. + * have been powered down, remove power from @genpd. */ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) - __releases(&genpd->lock) __acquires(&genpd->lock) { struct pm_domain_data *pdd; struct gpd_link *link; - unsigned int not_suspended; - int ret = 0; + unsigned int not_suspended = 0; - start: /* * Do not try to power off the domain in the following situations: * (1) The domain is already in the "power off" state. - * (2) The domain is waiting for its master to power up. - * (3) One of the domain's devices is being resumed right now. - * (4) System suspend is in progress. + * (2) System suspend is in progress. */ if (genpd->status == GPD_STATE_POWER_OFF - || genpd->status == GPD_STATE_WAIT_MASTER - || genpd->resume_count > 0 || genpd->prepared_count > 0) + || genpd->prepared_count > 0) return 0; if (atomic_read(&genpd->sd_count) > 0) return -EBUSY; - not_suspended = 0; list_for_each_entry(pdd, &genpd->dev_list, list_node) { enum pm_qos_flags_status stat; @@ -560,41 +406,11 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) if (not_suspended > genpd->in_progress) return -EBUSY; - if (genpd->poweroff_task) { - /* - * Another instance of pm_genpd_poweroff() is executing - * callbacks, so tell it to start over and return. - */ - genpd->status = GPD_STATE_REPEAT; - return 0; - } - if (genpd->gov && genpd->gov->power_down_ok) { if (!genpd->gov->power_down_ok(&genpd->domain)) return -EAGAIN; } - genpd->status = GPD_STATE_BUSY; - genpd->poweroff_task = current; - - list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { - ret = atomic_read(&genpd->sd_count) == 0 ? - __pm_genpd_save_device(pdd, genpd) : -EBUSY; - - if (genpd_abort_poweroff(genpd)) - goto out; - - if (ret) { - genpd_set_active(genpd); - goto out; - } - - if (genpd->status == GPD_STATE_REPEAT) { - genpd->poweroff_task = NULL; - goto start; - } - } - if (genpd->cpuidle_data) { /* * If cpuidle_data is set, cpuidle should turn the domain off @@ -607,14 +423,14 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) cpuidle_pause_and_lock(); genpd->cpuidle_data->idle_state->disabled = false; cpuidle_resume_and_unlock(); - goto out; + return 0; } if (genpd->power_off) { - if (atomic_read(&genpd->sd_count) > 0) { - ret = -EBUSY; - goto out; - } + int ret; + + if (atomic_read(&genpd->sd_count) > 0) + return -EBUSY; /* * If sd_count > 0 at this point, one of the subdomains hasn't @@ -625,10 +441,8 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) * happen very often). */ ret = genpd_power_off(genpd, true); - if (ret == -EBUSY) { - genpd_set_active(genpd); - goto out; - } + if (ret) + return ret; } genpd->status = GPD_STATE_POWER_OFF; @@ -638,10 +452,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) genpd_queue_power_off_work(link->master); } - out: - genpd->poweroff_task = NULL; - wake_up_all(&genpd->status_wait_queue); - return ret; + return 0; } /** @@ -654,9 +465,9 @@ static void genpd_power_off_work_fn(struct work_struct *work) genpd = container_of(work, struct generic_pm_domain, power_off_work); - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); pm_genpd_poweroff(genpd); - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); } /** @@ -670,7 +481,6 @@ static void genpd_power_off_work_fn(struct work_struct *work) static int pm_genpd_runtime_suspend(struct device *dev) { struct generic_pm_domain *genpd; - struct generic_pm_domain_data *gpd_data; bool (*stop_ok)(struct device *__dev); int ret; @@ -684,10 +494,16 @@ static int pm_genpd_runtime_suspend(struct device *dev) if (stop_ok && !stop_ok(dev)) return -EBUSY; - ret = genpd_stop_dev(genpd, dev); + ret = genpd_save_dev(genpd, dev); if (ret) return ret; + ret = genpd_stop_dev(genpd, dev); + if (ret) { + genpd_restore_dev(genpd, dev, true); + return ret; + } + /* * If power.irq_safe is set, this routine will be run with interrupts * off, so it can't use mutexes. @@ -696,16 +512,6 @@ static int pm_genpd_runtime_suspend(struct device *dev) return 0; mutex_lock(&genpd->lock); - - /* - * If we have an unknown state of the need_restore flag, it means none - * of the runtime PM callbacks has been invoked yet. Let's update the - * flag to reflect that the current state is active. - */ - gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); - if (gpd_data->need_restore < 0) - gpd_data->need_restore = 0; - genpd->in_progress++; pm_genpd_poweroff(genpd); genpd->in_progress--; @@ -725,8 +531,8 @@ static int pm_genpd_runtime_suspend(struct device *dev) static int pm_genpd_runtime_resume(struct device *dev) { struct generic_pm_domain *genpd; - DEFINE_WAIT(wait); int ret; + bool timed = true; dev_dbg(dev, "%s()\n", __func__); @@ -735,39 +541,21 @@ static int pm_genpd_runtime_resume(struct device *dev) return -EINVAL; /* If power.irq_safe, the PM domain is never powered off. */ - if (dev->power.irq_safe) - return genpd_start_dev_no_timing(genpd, dev); + if (dev->power.irq_safe) { + timed = false; + goto out; + } mutex_lock(&genpd->lock); ret = __pm_genpd_poweron(genpd); - if (ret) { - mutex_unlock(&genpd->lock); - return ret; - } - genpd->status = GPD_STATE_BUSY; - genpd->resume_count++; - for (;;) { - prepare_to_wait(&genpd->status_wait_queue, &wait, - TASK_UNINTERRUPTIBLE); - /* - * If current is the powering off task, we have been called - * reentrantly from one of the device callbacks, so we should - * not wait. - */ - if (!genpd->poweroff_task || genpd->poweroff_task == current) - break; - mutex_unlock(&genpd->lock); + mutex_unlock(&genpd->lock); - schedule(); + if (ret) + return ret; - mutex_lock(&genpd->lock); - } - finish_wait(&genpd->status_wait_queue, &wait); - __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); - genpd->resume_count--; - genpd_set_active(genpd); - wake_up_all(&genpd->status_wait_queue); - mutex_unlock(&genpd->lock); + out: + genpd_start_dev(genpd, dev, timed); + genpd_restore_dev(genpd, dev, timed); return 0; } @@ -883,7 +671,7 @@ static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd, { struct gpd_link *link; - if (genpd->status != GPD_STATE_POWER_OFF) + if (genpd->status == GPD_STATE_ACTIVE) return; list_for_each_entry(link, &genpd->slave_links, slave_node) { @@ -960,14 +748,14 @@ static int pm_genpd_prepare(struct device *dev) if (resume_needed(dev, genpd)) pm_runtime_resume(dev); - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); if (genpd->prepared_count++ == 0) { genpd->suspended_count = 0; genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; } - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); if (genpd->suspend_power_off) { pm_runtime_put_noidle(dev); @@ -1102,7 +890,7 @@ static int pm_genpd_resume_noirq(struct device *dev) pm_genpd_sync_poweron(genpd, true); genpd->suspended_count--; - return genpd_start_dev(genpd, dev); + return genpd_start_dev(genpd, dev, true); } /** @@ -1230,7 +1018,7 @@ static int pm_genpd_thaw_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev); + return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev, true); } /** @@ -1324,7 +1112,7 @@ static int pm_genpd_restore_noirq(struct device *dev) pm_genpd_sync_poweron(genpd, true); - return genpd_start_dev(genpd, dev); + return genpd_start_dev(genpd, dev, true); } /** @@ -1440,7 +1228,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, gpd_data->td = *td; gpd_data->base.dev = dev; - gpd_data->need_restore = -1; gpd_data->td.constraint_changed = true; gpd_data->td.effective_constraint_ns = -1; gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; @@ -1502,7 +1289,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, if (IS_ERR(gpd_data)) return PTR_ERR(gpd_data); - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); if (genpd->prepared_count > 0) { ret = -EAGAIN; @@ -1519,7 +1306,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); out: - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); if (ret) genpd_free_dev_data(dev, gpd_data); @@ -1563,7 +1350,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, gpd_data = to_gpd_data(pdd); dev_pm_qos_remove_notifier(dev, &gpd_data->nb); - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); if (genpd->prepared_count > 0) { ret = -EAGAIN; @@ -1578,14 +1365,14 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, list_del_init(&pdd->list_node); - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); genpd_free_dev_data(dev, gpd_data); return 0; out: - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); dev_pm_qos_add_notifier(dev, &gpd_data->nb); return ret; @@ -1606,17 +1393,9 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, || genpd == subdomain) return -EINVAL; - start: - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); - if (subdomain->status != GPD_STATE_POWER_OFF - && subdomain->status != GPD_STATE_ACTIVE) { - mutex_unlock(&subdomain->lock); - genpd_release_lock(genpd); - goto start; - } - if (genpd->status == GPD_STATE_POWER_OFF && subdomain->status != GPD_STATE_POWER_OFF) { ret = -EINVAL; @@ -1644,7 +1423,7 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, out: mutex_unlock(&subdomain->lock); - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); return ret; } @@ -1692,8 +1471,14 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) return -EINVAL; - start: - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); + + if (!list_empty(&subdomain->slave_links) || subdomain->device_count) { + pr_warn("%s: unable to remove subdomain %s\n", genpd->name, + subdomain->name); + ret = -EBUSY; + goto out; + } list_for_each_entry(link, &genpd->master_links, master_node) { if (link->slave != subdomain) @@ -1701,13 +1486,6 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); - if (subdomain->status != GPD_STATE_POWER_OFF - && subdomain->status != GPD_STATE_ACTIVE) { - mutex_unlock(&subdomain->lock); - genpd_release_lock(genpd); - goto start; - } - list_del(&link->master_node); list_del(&link->slave_node); kfree(link); @@ -1720,7 +1498,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, break; } - genpd_release_lock(genpd); +out: + mutex_unlock(&genpd->lock); return ret; } @@ -1744,7 +1523,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) if (IS_ERR_OR_NULL(genpd) || state < 0) return -EINVAL; - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); if (genpd->cpuidle_data) { ret = -EEXIST; @@ -1775,7 +1554,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) genpd_recalc_cpu_exit_latency(genpd); out: - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); return ret; err: @@ -1812,7 +1591,7 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) if (IS_ERR_OR_NULL(genpd)) return -EINVAL; - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); cpuidle_data = genpd->cpuidle_data; if (!cpuidle_data) { @@ -1830,7 +1609,7 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) kfree(cpuidle_data); out: - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); return ret; } @@ -1912,9 +1691,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd, genpd->in_progress = 0; atomic_set(&genpd->sd_count, 0); genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; - init_waitqueue_head(&genpd->status_wait_queue); - genpd->poweroff_task = NULL; - genpd->resume_count = 0; genpd->device_count = 0; genpd->max_off_time_ns = -1; genpd->max_off_time_changed = true; @@ -1952,6 +1728,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd, list_add(&genpd->gpd_list_node, &gpd_list); mutex_unlock(&gpd_list_lock); } +EXPORT_SYMBOL_GPL(pm_genpd_init); #ifdef CONFIG_PM_GENERIC_DOMAINS_OF /* @@ -2125,7 +1902,7 @@ EXPORT_SYMBOL_GPL(of_genpd_get_from_provider); /** * genpd_dev_pm_detach - Detach a device from its PM domain. - * @dev: Device to attach. + * @dev: Device to detach. * @power_off: Currently not used * * Try to locate a corresponding generic PM domain, which the device was @@ -2183,7 +1960,10 @@ static void genpd_dev_pm_sync(struct device *dev) * Both generic and legacy Samsung-specific DT bindings are supported to keep * backwards compatibility with existing DTBs. * - * Returns 0 on successfully attached PM domain or negative error code. + * Returns 0 on successfully attached PM domain or negative error code. Note + * that if a power-domain exists for the device, but it cannot be found or + * turned on, then return -EPROBE_DEFER to ensure that the device is not + * probed and to re-try again later. */ int genpd_dev_pm_attach(struct device *dev) { @@ -2220,7 +2000,7 @@ int genpd_dev_pm_attach(struct device *dev) dev_dbg(dev, "%s() failed to find PM domain: %ld\n", __func__, PTR_ERR(pd)); of_node_put(dev->of_node); - return PTR_ERR(pd); + return -EPROBE_DEFER; } dev_dbg(dev, "adding to PM domain %s\n", pd->name); @@ -2238,14 +2018,15 @@ int genpd_dev_pm_attach(struct device *dev) dev_err(dev, "failed to add to PM domain %s: %d", pd->name, ret); of_node_put(dev->of_node); - return ret; + goto out; } dev->pm_domain->detach = genpd_dev_pm_detach; dev->pm_domain->sync = genpd_dev_pm_sync; - pm_genpd_poweron(pd); + ret = pm_genpd_poweron(pd); - return 0; +out: + return ret ? -EPROBE_DEFER : 0; } EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ @@ -2293,9 +2074,6 @@ static int pm_genpd_summary_one(struct seq_file *s, { static const char * const status_lookup[] = { [GPD_STATE_ACTIVE] = "on", - [GPD_STATE_WAIT_MASTER] = "wait-master", - [GPD_STATE_BUSY] = "busy", - [GPD_STATE_REPEAT] = "off-in-progress", [GPD_STATE_POWER_OFF] = "off" }; struct pm_domain_data *pm_data; @@ -2309,7 +2087,7 @@ static int pm_genpd_summary_one(struct seq_file *s, if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) goto exit; - seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]); + seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]); /* * Modifications on the list require holding locks on both @@ -2344,8 +2122,8 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data) struct generic_pm_domain *genpd; int ret = 0; - seq_puts(s, " domain status slaves\n"); - seq_puts(s, " /device runtime status\n"); + seq_puts(s, "domain status slaves\n"); + seq_puts(s, " /device runtime status\n"); seq_puts(s, "----------------------------------------------------------------------\n"); ret = mutex_lock_interruptible(&gpd_list_lock); diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 2a4154a09..85e17bacc 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -77,13 +77,16 @@ static bool default_stop_ok(struct device *dev) dev_update_qos_constraint); if (constraint_ns > 0) { - constraint_ns -= td->start_latency_ns; + constraint_ns -= td->save_state_latency_ns + + td->stop_latency_ns + + td->start_latency_ns + + td->restore_state_latency_ns; if (constraint_ns == 0) return false; } td->effective_constraint_ns = constraint_ns; - td->cached_stop_ok = constraint_ns > td->stop_latency_ns || - constraint_ns == 0; + td->cached_stop_ok = constraint_ns >= 0; + /* * The children have been suspended already, so we don't need to take * their stop latencies into account here. @@ -126,18 +129,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) off_on_time_ns = genpd->power_off_latency_ns + genpd->power_on_latency_ns; - /* - * It doesn't make sense to remove power from the domain if saving - * the state of all devices in it and the power off/power on operations - * take too much time. - * - * All devices in this domain have been stopped already at this point. - */ - list_for_each_entry(pdd, &genpd->dev_list, list_node) { - if (pdd->dev->driver) - off_on_time_ns += - to_gpd_data(pdd)->td.save_state_latency_ns; - } min_off_time_ns = -1; /* @@ -193,7 +184,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) * constraint_ns cannot be negative here, because the device has * been suspended. */ - constraint_ns -= td->restore_state_latency_ns; if (constraint_ns <= off_on_time_ns) return false; diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 30b7bbfdc..1710c26ba 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1377,7 +1377,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (dev->power.direct_complete) { if (pm_runtime_status_suspended(dev)) { pm_runtime_disable(dev); - if (pm_runtime_suspended_if_enabled(dev)) + if (pm_runtime_status_suspended(dev)) goto Complete; pm_runtime_enable(dev); diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 677fb2843..7ae7cd990 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -11,6 +11,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include @@ -51,10 +52,17 @@ * order. * @dynamic: not-created from static DT entries. * @available: true/false - marks if this OPP as available or not + * @turbo: true if turbo (boost) OPP * @rate: Frequency in hertz - * @u_volt: Nominal voltage in microvolts corresponding to this OPP + * @u_volt: Target voltage in microvolts corresponding to this OPP + * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP + * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP + * @u_amp: Maximum current drawn by the device in microamperes + * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's + * frequency from any other OPP's frequency. * @dev_opp: points back to the device_opp struct this opp belongs to * @rcu_head: RCU callback head used for deferred freeing + * @np: OPP's device node. * * This structure stores the OPP information for a given device. */ @@ -63,11 +71,34 @@ struct dev_pm_opp { bool available; bool dynamic; + bool turbo; unsigned long rate; + unsigned long u_volt; + unsigned long u_volt_min; + unsigned long u_volt_max; + unsigned long u_amp; + unsigned long clock_latency_ns; struct device_opp *dev_opp; struct rcu_head rcu_head; + + struct device_node *np; +}; + +/** + * struct device_list_opp - devices managed by 'struct device_opp' + * @node: list node + * @dev: device to which the struct object belongs + * @rcu_head: RCU callback head used for deferred freeing + * + * This is an internal data structure maintaining the list of devices that are + * managed by 'struct device_opp'. + */ +struct device_list_opp { + struct list_head node; + const struct device *dev; + struct rcu_head rcu_head; }; /** @@ -77,10 +108,12 @@ struct dev_pm_opp { * list. * RCU usage: nodes are not modified in the list of device_opp, * however addition is possible and is secured by dev_opp_list_lock - * @dev: device pointer * @srcu_head: notifier head to notify the OPP availability changes. * @rcu_head: RCU callback head used for deferred freeing + * @dev_list: list of devices that share these OPPs * @opp_list: list of opps + * @np: struct device_node pointer for opp's DT node. + * @shared_opp: OPP is shared between multiple devices. * * This is an internal data structure maintaining the link to opps attached to * a device. This structure is not meant to be shared to users as it is @@ -93,10 +126,15 @@ struct dev_pm_opp { struct device_opp { struct list_head node; - struct device *dev; struct srcu_notifier_head srcu_head; struct rcu_head rcu_head; + struct list_head dev_list; struct list_head opp_list; + + struct device_node *np; + unsigned long clock_latency_ns_max; + bool shared_opp; + struct dev_pm_opp *suspend_opp; }; /* @@ -110,12 +148,44 @@ static DEFINE_MUTEX(dev_opp_list_lock); #define opp_rcu_lockdep_assert() \ do { \ - rcu_lockdep_assert(rcu_read_lock_held() || \ - lockdep_is_held(&dev_opp_list_lock), \ + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + !lockdep_is_held(&dev_opp_list_lock), \ "Missing rcu_read_lock() or " \ "dev_opp_list_lock protection"); \ } while (0) +static struct device_list_opp *_find_list_dev(const struct device *dev, + struct device_opp *dev_opp) +{ + struct device_list_opp *list_dev; + + list_for_each_entry(list_dev, &dev_opp->dev_list, node) + if (list_dev->dev == dev) + return list_dev; + + return NULL; +} + +static struct device_opp *_managed_opp(const struct device_node *np) +{ + struct device_opp *dev_opp; + + list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) { + if (dev_opp->np == np) { + /* + * Multiple devices can point to the same OPP table and + * so will have same node-pointer, np. + * + * But the OPPs will be considered as shared only if the + * OPP table contains a "opp-shared" property. + */ + return dev_opp->shared_opp ? dev_opp : NULL; + } + } + + return NULL; +} + /** * _find_device_opp() - find device_opp struct using device pointer * @dev: device pointer used to lookup device OPPs @@ -132,21 +202,18 @@ do { \ */ static struct device_opp *_find_device_opp(struct device *dev) { - struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); + struct device_opp *dev_opp; - if (unlikely(IS_ERR_OR_NULL(dev))) { + if (IS_ERR_OR_NULL(dev)) { pr_err("%s: Invalid parameters\n", __func__); return ERR_PTR(-EINVAL); } - list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) { - if (tmp_dev_opp->dev == dev) { - dev_opp = tmp_dev_opp; - break; - } - } + list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) + if (_find_list_dev(dev, dev_opp)) + return dev_opp; - return dev_opp; + return ERR_PTR(-ENODEV); } /** @@ -172,7 +239,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) opp_rcu_lockdep_assert(); tmp_opp = rcu_dereference(opp); - if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) + if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) pr_err("%s: Invalid parameters\n", __func__); else v = tmp_opp->u_volt; @@ -204,7 +271,7 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) opp_rcu_lockdep_assert(); tmp_opp = rcu_dereference(opp); - if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) + if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) pr_err("%s: Invalid parameters\n", __func__); else f = tmp_opp->rate; @@ -213,6 +280,94 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) } EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); +/** + * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not + * @opp: opp for which turbo mode is being verified + * + * Turbo OPPs are not for normal use, and can be enabled (under certain + * conditions) for short duration of times to finish high throughput work + * quickly. Running on them for longer times may overheat the chip. + * + * Return: true if opp is turbo opp, else false. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. This means that opp which could have been fetched by + * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are + * under RCU lock. The pointer returned by the opp_find_freq family must be + * used in the same section as the usage of this function with the pointer + * prior to unlocking with rcu_read_unlock() to maintain the integrity of the + * pointer. + */ +bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) +{ + struct dev_pm_opp *tmp_opp; + + opp_rcu_lockdep_assert(); + + tmp_opp = rcu_dereference(opp); + if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) { + pr_err("%s: Invalid parameters\n", __func__); + return false; + } + + return tmp_opp->turbo; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); + +/** + * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds + * @dev: device for which we do this operation + * + * Return: This function returns the max clock latency in nanoseconds. + * + * Locking: This function takes rcu_read_lock(). + */ +unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) +{ + struct device_opp *dev_opp; + unsigned long clock_latency_ns; + + rcu_read_lock(); + + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp)) + clock_latency_ns = 0; + else + clock_latency_ns = dev_opp->clock_latency_ns_max; + + rcu_read_unlock(); + return clock_latency_ns; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); + +/** + * dev_pm_opp_get_suspend_opp() - Get suspend opp + * @dev: device for which we do this operation + * + * Return: This function returns pointer to the suspend opp if it is + * defined and available, otherwise it returns NULL. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. The reason for the same is that the opp pointer which is + * returned will remain valid for use with opp_get_{voltage, freq} only while + * under the locked area. The pointer returned must be used prior to unlocking + * with rcu_read_unlock() to maintain the integrity of the pointer. + */ +struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) +{ + struct device_opp *dev_opp; + + opp_rcu_lockdep_assert(); + + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp) || !dev_opp->suspend_opp || + !dev_opp->suspend_opp->available) + return NULL; + + return dev_opp->suspend_opp; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp); + /** * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list * @dev: device for which we do this operation @@ -407,18 +562,57 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, } EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); +/* List-dev Helpers */ +static void _kfree_list_dev_rcu(struct rcu_head *head) +{ + struct device_list_opp *list_dev; + + list_dev = container_of(head, struct device_list_opp, rcu_head); + kfree_rcu(list_dev, rcu_head); +} + +static void _remove_list_dev(struct device_list_opp *list_dev, + struct device_opp *dev_opp) +{ + list_del(&list_dev->node); + call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head, + _kfree_list_dev_rcu); +} + +static struct device_list_opp *_add_list_dev(const struct device *dev, + struct device_opp *dev_opp) +{ + struct device_list_opp *list_dev; + + list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL); + if (!list_dev) + return NULL; + + /* Initialize list-dev */ + list_dev->dev = dev; + list_add_rcu(&list_dev->node, &dev_opp->dev_list); + + return list_dev; +} + /** - * _add_device_opp() - Allocate a new device OPP table + * _add_device_opp() - Find device OPP table or allocate a new one * @dev: device for which we do this operation * - * New device node which uses OPPs - used when multiple devices with OPP tables - * are maintained. + * It tries to find an existing table first, if it couldn't find one, it + * allocates a new OPP table and returns that. * * Return: valid device_opp pointer if success, else NULL. */ static struct device_opp *_add_device_opp(struct device *dev) { struct device_opp *dev_opp; + struct device_list_opp *list_dev; + + /* Check for existing list for 'dev' first */ + dev_opp = _find_device_opp(dev); + if (!IS_ERR(dev_opp)) + return dev_opp; /* * Allocate a new device OPP table. In the infrequent case where a new @@ -428,7 +622,14 @@ static struct device_opp *_add_device_opp(struct device *dev) if (!dev_opp) return NULL; - dev_opp->dev = dev; + INIT_LIST_HEAD(&dev_opp->dev_list); + + list_dev = _add_list_dev(dev, dev_opp); + if (!list_dev) { + kfree(dev_opp); + return NULL; + } + srcu_init_notifier_head(&dev_opp->srcu_head); INIT_LIST_HEAD(&dev_opp->opp_list); @@ -438,136 +639,41 @@ static struct device_opp *_add_device_opp(struct device *dev) } /** - * _opp_add_dynamic() - Allocate a dynamic OPP. - * @dev: device for which we do this operation - * @freq: Frequency in Hz for this OPP - * @u_volt: Voltage in uVolts for this OPP - * @dynamic: Dynamically added OPPs. - * - * This function adds an opp definition to the opp list and returns status. - * The opp is made available by default and it can be controlled using - * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. - * - * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and - * freed by of_free_opp_table. - * - * Locking: The internal device_opp and opp structures are RCU protected. - * Hence this function internally uses RCU updater strategy with mutex locks - * to keep the integrity of the internal data structures. Callers should ensure - * that this function is *NOT* called under RCU protection or in contexts where - * mutex cannot be locked. - * - * Return: - * 0 On success OR - * Duplicate OPPs (both freq and volt are same) and opp->available - * -EEXIST Freq are same and volt are different OR - * Duplicate OPPs (both freq and volt are same) and !opp->available - * -ENOMEM Memory allocation failure + * _kfree_device_rcu() - Free device_opp RCU handler + * @head: RCU head */ -static int _opp_add_dynamic(struct device *dev, unsigned long freq, - long u_volt, bool dynamic) +static void _kfree_device_rcu(struct rcu_head *head) { - struct device_opp *dev_opp = NULL; - struct dev_pm_opp *opp, *new_opp; - struct list_head *head; - int ret; - - /* allocate new OPP node */ - new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL); - if (!new_opp) - return -ENOMEM; - - /* Hold our list modification lock here */ - mutex_lock(&dev_opp_list_lock); - - /* populate the opp table */ - new_opp->rate = freq; - new_opp->u_volt = u_volt; - new_opp->available = true; - new_opp->dynamic = dynamic; - - /* Check for existing list for 'dev' */ - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) { - dev_opp = _add_device_opp(dev); - if (!dev_opp) { - ret = -ENOMEM; - goto free_opp; - } - - head = &dev_opp->opp_list; - goto list_add; - } - - /* - * Insert new OPP in order of increasing frequency - * and discard if already present - */ - head = &dev_opp->opp_list; - list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { - if (new_opp->rate <= opp->rate) - break; - else - head = &opp->node; - } - - /* Duplicate OPPs ? */ - if (new_opp->rate == opp->rate) { - ret = opp->available && new_opp->u_volt == opp->u_volt ? - 0 : -EEXIST; - - dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", - __func__, opp->rate, opp->u_volt, opp->available, - new_opp->rate, new_opp->u_volt, new_opp->available); - goto free_opp; - } - -list_add: - new_opp->dev_opp = dev_opp; - list_add_rcu(&new_opp->node, head); - mutex_unlock(&dev_opp_list_lock); - - /* - * Notify the changes in the availability of the operable - * frequency/voltage list. - */ - srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); - return 0; + struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head); -free_opp: - mutex_unlock(&dev_opp_list_lock); - kfree(new_opp); - return ret; + kfree_rcu(device_opp, rcu_head); } /** - * dev_pm_opp_add() - Add an OPP table from a table definitions - * @dev: device for which we do this operation - * @freq: Frequency in Hz for this OPP - * @u_volt: Voltage in uVolts for this OPP - * - * This function adds an opp definition to the opp list and returns status. - * The opp is made available by default and it can be controlled using - * dev_pm_opp_enable/disable functions. - * - * Locking: The internal device_opp and opp structures are RCU protected. - * Hence this function internally uses RCU updater strategy with mutex locks - * to keep the integrity of the internal data structures. Callers should ensure - * that this function is *NOT* called under RCU protection or in contexts where - * mutex cannot be locked. + * _remove_device_opp() - Removes a device OPP table + * @dev_opp: device OPP table to be removed. * - * Return: - * 0 On success OR - * Duplicate OPPs (both freq and volt are same) and opp->available - * -EEXIST Freq are same and volt are different OR - * Duplicate OPPs (both freq and volt are same) and !opp->available - * -ENOMEM Memory allocation failure + * Removes/frees device OPP table it it doesn't contain any OPPs. */ -int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) +static void _remove_device_opp(struct device_opp *dev_opp) { - return _opp_add_dynamic(dev, freq, u_volt, true); + struct device_list_opp *list_dev; + + if (!list_empty(&dev_opp->opp_list)) + return; + + list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp, + node); + + _remove_list_dev(list_dev, dev_opp); + + /* dev_list must be empty now */ + WARN_ON(!list_empty(&dev_opp->dev_list)); + + list_del_rcu(&dev_opp->node); + call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head, + _kfree_device_rcu); } -EXPORT_SYMBOL_GPL(dev_pm_opp_add); /** * _kfree_opp_rcu() - Free OPP RCU handler @@ -580,21 +686,11 @@ static void _kfree_opp_rcu(struct rcu_head *head) kfree_rcu(opp, rcu_head); } -/** - * _kfree_device_rcu() - Free device_opp RCU handler - * @head: RCU head - */ -static void _kfree_device_rcu(struct rcu_head *head) -{ - struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head); - - kfree_rcu(device_opp, rcu_head); -} - /** * _opp_remove() - Remove an OPP from a table definition * @dev_opp: points back to the device_opp struct this opp belongs to * @opp: pointer to the OPP to remove + * @notify: OPP_EVENT_REMOVE notification should be sent or not * * This function removes an opp definition from the opp list. * @@ -603,21 +699,18 @@ static void _kfree_device_rcu(struct rcu_head *head) * strategy. */ static void _opp_remove(struct device_opp *dev_opp, - struct dev_pm_opp *opp) + struct dev_pm_opp *opp, bool notify) { /* * Notify the changes in the availability of the operable * frequency/voltage list. */ - srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); + if (notify) + srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); list_del_rcu(&opp->node); call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); - if (list_empty(&dev_opp->opp_list)) { - list_del_rcu(&dev_opp->node); - call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head, - _kfree_device_rcu); - } + _remove_device_opp(dev_opp); } /** @@ -659,32 +752,334 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) goto unlock; } - _opp_remove(dev_opp, opp); + _opp_remove(dev_opp, opp, true); unlock: mutex_unlock(&dev_opp_list_lock); } EXPORT_SYMBOL_GPL(dev_pm_opp_remove); -/** - * _opp_set_availability() - helper to set the availability of an opp - * @dev: device for which we do this operation - * @freq: OPP frequency to modify availability - * @availability_req: availability status requested for this opp - * - * Set the availability of an OPP with an RCU operation, opp_{enable,disable} - * share a common logic which is isolated here. - * - * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modifcation was done OR modification was - * successful. - * - * Locking: The internal device_opp and opp structures are RCU protected. - * Hence this function internally uses RCU updater strategy with mutex locks to - * keep the integrity of the internal data structures. Callers should ensure - * that this function is *NOT* called under RCU protection or in contexts where - * mutex locking or synchronize_rcu() blocking calls cannot be used. - */ -static int _opp_set_availability(struct device *dev, unsigned long freq, +static struct dev_pm_opp *_allocate_opp(struct device *dev, + struct device_opp **dev_opp) +{ + struct dev_pm_opp *opp; + + /* allocate new OPP node */ + opp = kzalloc(sizeof(*opp), GFP_KERNEL); + if (!opp) + return NULL; + + INIT_LIST_HEAD(&opp->node); + + *dev_opp = _add_device_opp(dev); + if (!*dev_opp) { + kfree(opp); + return NULL; + } + + return opp; +} + +static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, + struct device_opp *dev_opp) +{ + struct dev_pm_opp *opp; + struct list_head *head = &dev_opp->opp_list; + + /* + * Insert new OPP in order of increasing frequency and discard if + * already present. + * + * Need to use &dev_opp->opp_list in the condition part of the 'for' + * loop, don't replace it with head otherwise it will become an infinite + * loop. + */ + list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { + if (new_opp->rate > opp->rate) { + head = &opp->node; + continue; + } + + if (new_opp->rate < opp->rate) + break; + + /* Duplicate OPPs */ + dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", + __func__, opp->rate, opp->u_volt, opp->available, + new_opp->rate, new_opp->u_volt, new_opp->available); + + return opp->available && new_opp->u_volt == opp->u_volt ? + 0 : -EEXIST; + } + + new_opp->dev_opp = dev_opp; + list_add_rcu(&new_opp->node, head); + + return 0; +} + +/** + * _opp_add_dynamic() - Allocate a dynamic OPP. + * @dev: device for which we do this operation + * @freq: Frequency in Hz for this OPP + * @u_volt: Voltage in uVolts for this OPP + * @dynamic: Dynamically added OPPs. + * + * This function adds an opp definition to the opp list and returns status. + * The opp is made available by default and it can be controlled using + * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. + * + * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and + * freed by of_free_opp_table. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + * + * Return: + * 0 On success OR + * Duplicate OPPs (both freq and volt are same) and opp->available + * -EEXIST Freq are same and volt are different OR + * Duplicate OPPs (both freq and volt are same) and !opp->available + * -ENOMEM Memory allocation failure + */ +static int _opp_add_dynamic(struct device *dev, unsigned long freq, + long u_volt, bool dynamic) +{ + struct device_opp *dev_opp; + struct dev_pm_opp *new_opp; + int ret; + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + new_opp = _allocate_opp(dev, &dev_opp); + if (!new_opp) { + ret = -ENOMEM; + goto unlock; + } + + /* populate the opp table */ + new_opp->rate = freq; + new_opp->u_volt = u_volt; + new_opp->available = true; + new_opp->dynamic = dynamic; + + ret = _opp_add(dev, new_opp, dev_opp); + if (ret) + goto free_opp; + + mutex_unlock(&dev_opp_list_lock); + + /* + * Notify the changes in the availability of the operable + * frequency/voltage list. + */ + srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); + return 0; + +free_opp: + _opp_remove(dev_opp, new_opp, false); +unlock: + mutex_unlock(&dev_opp_list_lock); + return ret; +} + +/* TODO: Support multiple regulators */ +static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev) +{ + u32 microvolt[3] = {0}; + int count, ret; + + /* Missing property isn't a problem, but an invalid entry is */ + if (!of_find_property(opp->np, "opp-microvolt", NULL)) + return 0; + + count = of_property_count_u32_elems(opp->np, "opp-microvolt"); + if (count < 0) { + dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n", + __func__, count); + return count; + } + + /* There can be one or three elements here */ + if (count != 1 && count != 3) { + dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", + __func__, count); + return -EINVAL; + } + + ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt, + count); + if (ret) { + dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__, + ret); + return -EINVAL; + } + + opp->u_volt = microvolt[0]; + opp->u_volt_min = microvolt[1]; + opp->u_volt_max = microvolt[2]; + + return 0; +} + +/** + * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings) + * @dev: device for which we do this operation + * @np: device node + * + * This function adds an opp definition to the opp list and returns status. The + * opp can be controlled using dev_pm_opp_enable/disable functions and may be + * removed by dev_pm_opp_remove. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + * + * Return: + * 0 On success OR + * Duplicate OPPs (both freq and volt are same) and opp->available + * -EEXIST Freq are same and volt are different OR + * Duplicate OPPs (both freq and volt are same) and !opp->available + * -ENOMEM Memory allocation failure + * -EINVAL Failed parsing the OPP node + */ +static int _opp_add_static_v2(struct device *dev, struct device_node *np) +{ + struct device_opp *dev_opp; + struct dev_pm_opp *new_opp; + u64 rate; + u32 val; + int ret; + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + new_opp = _allocate_opp(dev, &dev_opp); + if (!new_opp) { + ret = -ENOMEM; + goto unlock; + } + + ret = of_property_read_u64(np, "opp-hz", &rate); + if (ret < 0) { + dev_err(dev, "%s: opp-hz not found\n", __func__); + goto free_opp; + } + + /* + * Rate is defined as an unsigned long in clk API, and so casting + * explicitly to its type. Must be fixed once rate is 64 bit + * guaranteed in clk API. + */ + new_opp->rate = (unsigned long)rate; + new_opp->turbo = of_property_read_bool(np, "turbo-mode"); + + new_opp->np = np; + new_opp->dynamic = false; + new_opp->available = true; + + if (!of_property_read_u32(np, "clock-latency-ns", &val)) + new_opp->clock_latency_ns = val; + + ret = opp_get_microvolt(new_opp, dev); + if (ret) + goto free_opp; + + if (!of_property_read_u32(new_opp->np, "opp-microamp", &val)) + new_opp->u_amp = val; + + ret = _opp_add(dev, new_opp, dev_opp); + if (ret) + goto free_opp; + + /* OPP to select on device suspend */ + if (of_property_read_bool(np, "opp-suspend")) { + if (dev_opp->suspend_opp) + dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n", + __func__, dev_opp->suspend_opp->rate, + new_opp->rate); + else + dev_opp->suspend_opp = new_opp; + } + + if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max) + dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns; + + mutex_unlock(&dev_opp_list_lock); + + pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", + __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt, + new_opp->u_volt_min, new_opp->u_volt_max, + new_opp->clock_latency_ns); + + /* + * Notify the changes in the availability of the operable + * frequency/voltage list. + */ + srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); + return 0; + +free_opp: + _opp_remove(dev_opp, new_opp, false); +unlock: + mutex_unlock(&dev_opp_list_lock); + return ret; +} + +/** + * dev_pm_opp_add() - Add an OPP table from a table definitions + * @dev: device for which we do this operation + * @freq: Frequency in Hz for this OPP + * @u_volt: Voltage in uVolts for this OPP + * + * This function adds an opp definition to the opp list and returns status. + * The opp is made available by default and it can be controlled using + * dev_pm_opp_enable/disable functions. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + * + * Return: + * 0 On success OR + * Duplicate OPPs (both freq and volt are same) and opp->available + * -EEXIST Freq are same and volt are different OR + * Duplicate OPPs (both freq and volt are same) and !opp->available + * -ENOMEM Memory allocation failure + */ +int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) +{ + return _opp_add_dynamic(dev, freq, u_volt, true); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_add); + +/** + * _opp_set_availability() - helper to set the availability of an opp + * @dev: device for which we do this operation + * @freq: OPP frequency to modify availability + * @availability_req: availability status requested for this opp + * + * Set the availability of an OPP with an RCU operation, opp_{enable,disable} + * share a common logic which is isolated here. + * + * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the + * copy operation, returns 0 if no modification was done OR modification was + * successful. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks to + * keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex locking or synchronize_rcu() blocking calls cannot be used. + */ +static int _opp_set_availability(struct device *dev, unsigned long freq, bool availability_req) { struct device_opp *dev_opp; @@ -763,7 +1158,7 @@ unlock: * mutex locking or synchronize_rcu() blocking calls cannot be used. * * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modifcation was done OR modification was + * copy operation, returns 0 if no modification was done OR modification was * successful. */ int dev_pm_opp_enable(struct device *dev, unsigned long freq) @@ -789,7 +1184,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable); * mutex locking or synchronize_rcu() blocking calls cannot be used. * * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modifcation was done OR modification was + * copy operation, returns 0 if no modification was done OR modification was * successful. */ int dev_pm_opp_disable(struct device *dev, unsigned long freq) @@ -825,28 +1220,179 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier); #ifdef CONFIG_OF /** - * of_init_opp_table() - Initialize opp table from device tree + * of_free_opp_table() - Free OPP table entries created from static DT entries * @dev: device pointer used to lookup device OPPs. * - * Register the initial OPP table with the OPP library for given device. + * Free OPPs created using static entries present in DT. * * Locking: The internal device_opp and opp structures are RCU protected. * Hence this function indirectly uses RCU updater strategy with mutex locks * to keep the integrity of the internal data structures. Callers should ensure * that this function is *NOT* called under RCU protection or in contexts where * mutex cannot be locked. - * - * Return: - * 0 On success OR - * Duplicate OPPs (both freq and volt are same) and opp->available - * -EEXIST Freq are same and volt are different OR - * Duplicate OPPs (both freq and volt are same) and !opp->available - * -ENOMEM Memory allocation failure - * -ENODEV when 'operating-points' property is not found or is invalid data - * in device node. - * -ENODATA when empty 'operating-points' property is found */ -int of_init_opp_table(struct device *dev) +void of_free_opp_table(struct device *dev) +{ + struct device_opp *dev_opp; + struct dev_pm_opp *opp, *tmp; + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + /* Check for existing list for 'dev' */ + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp)) { + int error = PTR_ERR(dev_opp); + + if (error != -ENODEV) + WARN(1, "%s: dev_opp: %d\n", + IS_ERR_OR_NULL(dev) ? + "Invalid device" : dev_name(dev), + error); + goto unlock; + } + + /* Find if dev_opp manages a single device */ + if (list_is_singular(&dev_opp->dev_list)) { + /* Free static OPPs */ + list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) { + if (!opp->dynamic) + _opp_remove(dev_opp, opp, true); + } + } else { + _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp); + } + +unlock: + mutex_unlock(&dev_opp_list_lock); +} +EXPORT_SYMBOL_GPL(of_free_opp_table); + +void of_cpumask_free_opp_table(cpumask_var_t cpumask) +{ + struct device *cpu_dev; + int cpu; + + WARN_ON(cpumask_empty(cpumask)); + + for_each_cpu(cpu, cpumask) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + pr_err("%s: failed to get cpu%d device\n", __func__, + cpu); + continue; + } + + of_free_opp_table(cpu_dev); + } +} +EXPORT_SYMBOL_GPL(of_cpumask_free_opp_table); + +/* Returns opp descriptor node from its phandle. Caller must do of_node_put() */ +static struct device_node * +_of_get_opp_desc_node_from_prop(struct device *dev, const struct property *prop) +{ + struct device_node *opp_np; + + opp_np = of_find_node_by_phandle(be32_to_cpup(prop->value)); + if (!opp_np) { + dev_err(dev, "%s: Prop: %s contains invalid opp desc phandle\n", + __func__, prop->name); + return ERR_PTR(-EINVAL); + } + + return opp_np; +} + +/* Returns opp descriptor node for a device. Caller must do of_node_put() */ +static struct device_node *_of_get_opp_desc_node(struct device *dev) +{ + const struct property *prop; + + prop = of_find_property(dev->of_node, "operating-points-v2", NULL); + if (!prop) + return ERR_PTR(-ENODEV); + if (!prop->value) + return ERR_PTR(-ENODATA); + + /* + * TODO: Support for multiple OPP tables. + * + * There should be only ONE phandle present in "operating-points-v2" + * property. + */ + if (prop->length != sizeof(__be32)) { + dev_err(dev, "%s: Invalid opp desc phandle\n", __func__); + return ERR_PTR(-EINVAL); + } + + return _of_get_opp_desc_node_from_prop(dev, prop); +} + +/* Initializes OPP tables based on new bindings */ +static int _of_init_opp_table_v2(struct device *dev, + const struct property *prop) +{ + struct device_node *opp_np, *np; + struct device_opp *dev_opp; + int ret = 0, count = 0; + + if (!prop->value) + return -ENODATA; + + /* Get opp node */ + opp_np = _of_get_opp_desc_node_from_prop(dev, prop); + if (IS_ERR(opp_np)) + return PTR_ERR(opp_np); + + dev_opp = _managed_opp(opp_np); + if (dev_opp) { + /* OPPs are already managed */ + if (!_add_list_dev(dev, dev_opp)) + ret = -ENOMEM; + goto put_opp_np; + } + + /* We have opp-list node now, iterate over it and add OPPs */ + for_each_available_child_of_node(opp_np, np) { + count++; + + ret = _opp_add_static_v2(dev, np); + if (ret) { + dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, + ret); + goto free_table; + } + } + + /* There should be one of more OPP defined */ + if (WARN_ON(!count)) { + ret = -ENOENT; + goto put_opp_np; + } + + dev_opp = _find_device_opp(dev); + if (WARN_ON(IS_ERR(dev_opp))) { + ret = PTR_ERR(dev_opp); + goto free_table; + } + + dev_opp->np = opp_np; + dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared"); + + of_node_put(opp_np); + return 0; + +free_table: + of_free_opp_table(dev); +put_opp_np: + of_node_put(opp_np); + + return ret; +} + +/* Initializes OPP tables based on old-deprecated bindings */ +static int _of_init_opp_table_v1(struct device *dev) { const struct property *prop; const __be32 *val; @@ -881,47 +1427,177 @@ int of_init_opp_table(struct device *dev) return 0; } -EXPORT_SYMBOL_GPL(of_init_opp_table); /** - * of_free_opp_table() - Free OPP table entries created from static DT entries + * of_init_opp_table() - Initialize opp table from device tree * @dev: device pointer used to lookup device OPPs. * - * Free OPPs created using static entries present in DT. + * Register the initial OPP table with the OPP library for given device. * * Locking: The internal device_opp and opp structures are RCU protected. * Hence this function indirectly uses RCU updater strategy with mutex locks * to keep the integrity of the internal data structures. Callers should ensure * that this function is *NOT* called under RCU protection or in contexts where * mutex cannot be locked. + * + * Return: + * 0 On success OR + * Duplicate OPPs (both freq and volt are same) and opp->available + * -EEXIST Freq are same and volt are different OR + * Duplicate OPPs (both freq and volt are same) and !opp->available + * -ENOMEM Memory allocation failure + * -ENODEV when 'operating-points' property is not found or is invalid data + * in device node. + * -ENODATA when empty 'operating-points' property is found + * -EINVAL when invalid entries are found in opp-v2 table */ -void of_free_opp_table(struct device *dev) +int of_init_opp_table(struct device *dev) +{ + const struct property *prop; + + /* + * OPPs have two version of bindings now. The older one is deprecated, + * try for the new binding first. + */ + prop = of_find_property(dev->of_node, "operating-points-v2", NULL); + if (!prop) { + /* + * Try old-deprecated bindings for backward compatibility with + * older dtbs. + */ + return _of_init_opp_table_v1(dev); + } + + return _of_init_opp_table_v2(dev, prop); +} +EXPORT_SYMBOL_GPL(of_init_opp_table); + +int of_cpumask_init_opp_table(cpumask_var_t cpumask) +{ + struct device *cpu_dev; + int cpu, ret = 0; + + WARN_ON(cpumask_empty(cpumask)); + + for_each_cpu(cpu, cpumask) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + pr_err("%s: failed to get cpu%d device\n", __func__, + cpu); + continue; + } + + ret = of_init_opp_table(cpu_dev); + if (ret) { + pr_err("%s: couldn't find opp table for cpu:%d, %d\n", + __func__, cpu, ret); + + /* Free all other OPPs */ + of_cpumask_free_opp_table(cpumask); + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(of_cpumask_init_opp_table); + +/* Required only for V1 bindings, as v2 can manage it from DT itself */ +int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) { + struct device_list_opp *list_dev; struct device_opp *dev_opp; - struct dev_pm_opp *opp, *tmp; + struct device *dev; + int cpu, ret = 0; - /* Check for existing list for 'dev' */ - dev_opp = _find_device_opp(dev); + rcu_read_lock(); + + dev_opp = _find_device_opp(cpu_dev); if (IS_ERR(dev_opp)) { - int error = PTR_ERR(dev_opp); - if (error != -ENODEV) - WARN(1, "%s: dev_opp: %d\n", - IS_ERR_OR_NULL(dev) ? - "Invalid device" : dev_name(dev), - error); - return; + ret = -EINVAL; + goto out_rcu_read_unlock; } - /* Hold our list modification lock here */ - mutex_lock(&dev_opp_list_lock); + for_each_cpu(cpu, cpumask) { + if (cpu == cpu_dev->id) + continue; - /* Free static OPPs */ - list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) { - if (!opp->dynamic) - _opp_remove(dev_opp, opp); + dev = get_cpu_device(cpu); + if (!dev) { + dev_err(cpu_dev, "%s: failed to get cpu%d device\n", + __func__, cpu); + continue; + } + + list_dev = _add_list_dev(dev, dev_opp); + if (!list_dev) { + dev_err(dev, "%s: failed to add list-dev for cpu%d device\n", + __func__, cpu); + continue; + } } +out_rcu_read_unlock: + rcu_read_unlock(); - mutex_unlock(&dev_opp_list_lock); + return 0; } -EXPORT_SYMBOL_GPL(of_free_opp_table); +EXPORT_SYMBOL_GPL(set_cpus_sharing_opps); + +/* + * Works only for OPP v2 bindings. + * + * cpumask should be already set to mask of cpu_dev->id. + * Returns -ENOENT if operating-points-v2 bindings aren't supported. + */ +int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) +{ + struct device_node *np, *tmp_np; + struct device *tcpu_dev; + int cpu, ret = 0; + + /* Get OPP descriptor node */ + np = _of_get_opp_desc_node(cpu_dev); + if (IS_ERR(np)) { + dev_dbg(cpu_dev, "%s: Couldn't find opp node: %ld\n", __func__, + PTR_ERR(np)); + return -ENOENT; + } + + /* OPPs are shared ? */ + if (!of_property_read_bool(np, "opp-shared")) + goto put_cpu_node; + + for_each_possible_cpu(cpu) { + if (cpu == cpu_dev->id) + continue; + + tcpu_dev = get_cpu_device(cpu); + if (!tcpu_dev) { + dev_err(cpu_dev, "%s: failed to get cpu%d device\n", + __func__, cpu); + ret = -ENODEV; + goto put_cpu_node; + } + + /* Get OPP descriptor node */ + tmp_np = _of_get_opp_desc_node(tcpu_dev); + if (IS_ERR(tmp_np)) { + dev_err(tcpu_dev, "%s: Couldn't find opp node: %ld\n", + __func__, PTR_ERR(tmp_np)); + ret = PTR_ERR(tmp_np); + goto put_cpu_node; + } + + /* CPUs are sharing opp node */ + if (np == tmp_np) + cpumask_set_cpu(cpu, cpumask); + + of_node_put(tmp_np); + } + +put_cpu_node: + of_node_put(np); + return ret; +} +EXPORT_SYMBOL_GPL(of_get_cpus_sharing_opps); #endif diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index f1a5d95e7..998fa6b23 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -73,6 +73,8 @@ extern int pm_qos_sysfs_add_resume_latency(struct device *dev); extern void pm_qos_sysfs_remove_resume_latency(struct device *dev); extern int pm_qos_sysfs_add_flags(struct device *dev); extern void pm_qos_sysfs_remove_flags(struct device *dev); +extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev); +extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev); #else /* CONFIG_PM */ diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index e56d538d0..7f3646e45 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -883,3 +883,40 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) mutex_unlock(&dev_pm_qos_mtx); return ret; } + +/** + * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace + * @dev: Device whose latency tolerance to expose + */ +int dev_pm_qos_expose_latency_tolerance(struct device *dev) +{ + int ret; + + if (!dev->power.set_latency_tolerance) + return -EINVAL; + + mutex_lock(&dev_pm_qos_sysfs_mtx); + ret = pm_qos_sysfs_add_latency_tolerance(dev); + mutex_unlock(&dev_pm_qos_sysfs_mtx); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance); + +/** + * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace + * @dev: Device whose latency tolerance to hide + */ +void dev_pm_qos_hide_latency_tolerance(struct device *dev) +{ + mutex_lock(&dev_pm_qos_sysfs_mtx); + pm_qos_sysfs_remove_latency_tolerance(dev); + mutex_unlock(&dev_pm_qos_sysfs_mtx); + + /* Remove the request from user space now */ + pm_runtime_get_sync(dev); + dev_pm_qos_update_user_latency_tolerance(dev, + PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT); + pm_runtime_put(dev); +} +EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance); diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index d2be3f9c2..a7b46798c 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -738,6 +738,17 @@ void pm_qos_sysfs_remove_flags(struct device *dev) sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group); } +int pm_qos_sysfs_add_latency_tolerance(struct device *dev) +{ + return sysfs_merge_group(&dev->kobj, + &pm_qos_latency_tolerance_attr_group); +} + +void pm_qos_sysfs_remove_latency_tolerance(struct device *dev) +{ + sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); +} + void rpm_sysfs_remove(struct device *dev) { sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); diff --git a/drivers/base/property.c b/drivers/base/property.c index 37a7bb7b2..2d75366c6 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -16,6 +16,8 @@ #include #include #include +#include +#include /** * device_add_property_set - Add a collection of properties to a device object. @@ -154,6 +156,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_present); * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of numbers, * %-EOVERFLOW if the size of the property is not as expected. + * %-ENXIO if no suitable firmware interface is present. */ int device_property_read_u8_array(struct device *dev, const char *propname, u8 *val, size_t nval) @@ -178,6 +181,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u8_array); * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of numbers, * %-EOVERFLOW if the size of the property is not as expected. + * %-ENXIO if no suitable firmware interface is present. */ int device_property_read_u16_array(struct device *dev, const char *propname, u16 *val, size_t nval) @@ -202,6 +206,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u16_array); * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of numbers, * %-EOVERFLOW if the size of the property is not as expected. + * %-ENXIO if no suitable firmware interface is present. */ int device_property_read_u32_array(struct device *dev, const char *propname, u32 *val, size_t nval) @@ -226,6 +231,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u32_array); * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of numbers, * %-EOVERFLOW if the size of the property is not as expected. + * %-ENXIO if no suitable firmware interface is present. */ int device_property_read_u64_array(struct device *dev, const char *propname, u64 *val, size_t nval) @@ -250,6 +256,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u64_array); * %-ENODATA if the property does not have a value, * %-EPROTO or %-EILSEQ if the property is not an array of strings, * %-EOVERFLOW if the size of the property is not as expected. + * %-ENXIO if no suitable firmware interface is present. */ int device_property_read_string_array(struct device *dev, const char *propname, const char **val, size_t nval) @@ -271,6 +278,7 @@ EXPORT_SYMBOL_GPL(device_property_read_string_array); * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO or %-EILSEQ if the property type is not a string. + * %-ENXIO if no suitable firmware interface is present. */ int device_property_read_string(struct device *dev, const char *propname, const char **val) @@ -292,9 +300,11 @@ EXPORT_SYMBOL_GPL(device_property_read_string); else if (is_acpi_node(_fwnode_)) \ _ret_ = acpi_dev_prop_read(to_acpi_node(_fwnode_), _propname_, \ _proptype_, _val_, _nval_); \ - else \ + else if (is_pset(_fwnode_)) \ _ret_ = pset_prop_read_array(to_pset(_fwnode_), _propname_, \ _proptype_, _val_, _nval_); \ + else \ + _ret_ = -ENXIO; \ _ret_; \ }) @@ -432,9 +442,10 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode, else if (is_acpi_node(fwnode)) return acpi_dev_prop_read(to_acpi_node(fwnode), propname, DEV_PROP_STRING, val, nval); - - return pset_prop_read_array(to_pset(fwnode), propname, - DEV_PROP_STRING, val, nval); + else if (is_pset(fwnode)) + return pset_prop_read_array(to_pset(fwnode), propname, + DEV_PROP_STRING, val, nval); + return -ENXIO; } EXPORT_SYMBOL_GPL(fwnode_property_read_string_array); @@ -462,7 +473,8 @@ int fwnode_property_read_string(struct fwnode_handle *fwnode, return acpi_dev_prop_read(to_acpi_node(fwnode), propname, DEV_PROP_STRING, val, 1); - return -ENXIO; + return pset_prop_read_array(to_pset(fwnode), propname, + DEV_PROP_STRING, val, 1); } EXPORT_SYMBOL_GPL(fwnode_property_read_string); @@ -534,3 +546,81 @@ bool device_dma_is_coherent(struct device *dev) return coherent; } EXPORT_SYMBOL_GPL(device_dma_is_coherent); + +/** + * device_get_phy_mode - Get phy mode for given device + * @dev: Pointer to the given device + * + * The function gets phy interface string from property 'phy-mode' or + * 'phy-connection-type', and return its index in phy_modes table, or errno in + * error case. + */ +int device_get_phy_mode(struct device *dev) +{ + const char *pm; + int err, i; + + err = device_property_read_string(dev, "phy-mode", &pm); + if (err < 0) + err = device_property_read_string(dev, + "phy-connection-type", &pm); + if (err < 0) + return err; + + for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) + if (!strcasecmp(pm, phy_modes(i))) + return i; + + return -ENODEV; +} +EXPORT_SYMBOL_GPL(device_get_phy_mode); + +static void *device_get_mac_addr(struct device *dev, + const char *name, char *addr, + int alen) +{ + int ret = device_property_read_u8_array(dev, name, addr, alen); + + if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr)) + return addr; + return NULL; +} + +/** + * device_get_mac_address - Get the MAC for a given device + * @dev: Pointer to the device + * @addr: Address of buffer to store the MAC in + * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN + * + * Search the firmware node for the best MAC address to use. 'mac-address' is + * checked first, because that is supposed to contain to "most recent" MAC + * address. If that isn't set, then 'local-mac-address' is checked next, + * because that is the default address. If that isn't set, then the obsolete + * 'address' is checked, just in case we're using an old device tree. + * + * Note that the 'address' property is supposed to contain a virtual address of + * the register set, but some DTS files have redefined that property to be the + * MAC address. + * + * All-zero MAC addresses are rejected, because those could be properties that + * exist in the firmware tables, but were not updated by the firmware. For + * example, the DTS could define 'mac-address' and 'local-mac-address', with + * zero MAC addresses. Some older U-Boots only initialized 'local-mac-address'. + * In this case, the real MAC is in 'local-mac-address', and 'mac-address' + * exists but is all zeros. +*/ +void *device_get_mac_address(struct device *dev, char *addr, int alen) +{ + char *res; + + res = device_get_mac_addr(dev, "mac-address", addr, alen); + if (res) + return res; + + res = device_get_mac_addr(dev, "local-mac-address", addr, alen); + if (res) + return res; + + return device_get_mac_addr(dev, "address", addr, alen); +} +EXPORT_SYMBOL(device_get_mac_address); diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index b2b2849fc..cc557886a 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -136,14 +136,20 @@ struct regmap { /* if set, the HW registers are known to match map->reg_defaults */ bool no_sync_defaults; - struct reg_default *patch; + struct reg_sequence *patch; int patch_regs; - /* if set, converts bulk rw to single rw */ - bool use_single_rw; + /* if set, converts bulk read to single read */ + bool use_single_read; + /* if set, converts bulk read to single read */ + bool use_single_write; /* if set, the device supports multi write mode */ bool can_multi_write; + /* if set, raw reads/writes are limited to this size */ + size_t max_raw_read; + size_t max_raw_write; + struct rb_root range_tree; void *selector_work_buf; /* Scratch buffer used for selector */ }; diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index b9862d741..6f8a13ec3 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -729,7 +729,7 @@ int regcache_sync_block(struct regmap *map, void *block, unsigned int block_base, unsigned int start, unsigned int end) { - if (regmap_can_raw_write(map) && !map->use_single_rw) + if (regmap_can_raw_write(map) && !map->use_single_write) return regcache_sync_block_raw(map, block, cache_present, block_base, start, end); else diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c index 8d304e2a9..c03ebfd4c 100644 --- a/drivers/base/regmap/regmap-ac97.c +++ b/drivers/base/regmap/regmap-ac97.c @@ -78,37 +78,24 @@ static const struct regmap_bus ac97_regmap_bus = { .reg_read = regmap_ac97_reg_read, }; -/** - * regmap_init_ac97(): Initialise AC'97 register map - * - * @ac97: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. - */ -struct regmap *regmap_init_ac97(struct snd_ac97 *ac97, - const struct regmap_config *config) +struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) { - return regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config); + return __regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config, + lock_key, lock_name); } -EXPORT_SYMBOL_GPL(regmap_init_ac97); +EXPORT_SYMBOL_GPL(__regmap_init_ac97); -/** - * devm_regmap_init_ac97(): Initialise AC'97 register map - * - * @ac97: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap. The regmap will be automatically freed by the - * device management code. - */ -struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97, - const struct regmap_config *config) +struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) { - return devm_regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config); + return __devm_regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config, + lock_key, lock_name); } -EXPORT_SYMBOL_GPL(devm_regmap_init_ac97); +EXPORT_SYMBOL_GPL(__devm_regmap_init_ac97); MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index c8941f39c..4c55cfbad 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -468,6 +468,87 @@ static const struct file_operations regmap_access_fops = { .llseek = default_llseek, }; +static ssize_t regmap_cache_only_write_file(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct regmap *map = container_of(file->private_data, + struct regmap, cache_only); + ssize_t result; + bool was_enabled, require_sync = false; + int err; + + map->lock(map->lock_arg); + + was_enabled = map->cache_only; + + result = debugfs_write_file_bool(file, user_buf, count, ppos); + if (result < 0) { + map->unlock(map->lock_arg); + return result; + } + + if (map->cache_only && !was_enabled) { + dev_warn(map->dev, "debugfs cache_only=Y forced\n"); + add_taint(TAINT_USER, LOCKDEP_STILL_OK); + } else if (!map->cache_only && was_enabled) { + dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n"); + require_sync = true; + } + + map->unlock(map->lock_arg); + + if (require_sync) { + err = regcache_sync(map); + if (err) + dev_err(map->dev, "Failed to sync cache %d\n", err); + } + + return result; +} + +static const struct file_operations regmap_cache_only_fops = { + .open = simple_open, + .read = debugfs_read_file_bool, + .write = regmap_cache_only_write_file, +}; + +static ssize_t regmap_cache_bypass_write_file(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct regmap *map = container_of(file->private_data, + struct regmap, cache_bypass); + ssize_t result; + bool was_enabled; + + map->lock(map->lock_arg); + + was_enabled = map->cache_bypass; + + result = debugfs_write_file_bool(file, user_buf, count, ppos); + if (result < 0) + goto out; + + if (map->cache_bypass && !was_enabled) { + dev_warn(map->dev, "debugfs cache_bypass=Y forced\n"); + add_taint(TAINT_USER, LOCKDEP_STILL_OK); + } else if (!map->cache_bypass && was_enabled) { + dev_warn(map->dev, "debugfs cache_bypass=N forced\n"); + } + +out: + map->unlock(map->lock_arg); + + return result; +} + +static const struct file_operations regmap_cache_bypass_fops = { + .open = simple_open, + .read = debugfs_read_file_bool, + .write = regmap_cache_bypass_write_file, +}; + void regmap_debugfs_init(struct regmap *map, const char *name) { struct rb_node *next; @@ -517,10 +598,11 @@ void regmap_debugfs_init(struct regmap *map, const char *name) if (map->max_register || regmap_readable(map, 0)) { umode_t registers_mode; - if (IS_ENABLED(REGMAP_ALLOW_WRITE_DEBUGFS)) - registers_mode = 0600; - else - registers_mode = 0400; +#if defined(REGMAP_ALLOW_WRITE_DEBUGFS) + registers_mode = 0600; +#else + registers_mode = 0400; +#endif debugfs_create_file("registers", registers_mode, map->debugfs, map, ®map_map_fops); @@ -529,12 +611,13 @@ void regmap_debugfs_init(struct regmap *map, const char *name) } if (map->cache_type) { - debugfs_create_bool("cache_only", 0400, map->debugfs, - &map->cache_only); + debugfs_create_file("cache_only", 0600, map->debugfs, + &map->cache_only, ®map_cache_only_fops); debugfs_create_bool("cache_dirty", 0400, map->debugfs, &map->cache_dirty); - debugfs_create_bool("cache_bypass", 0400, map->debugfs, - &map->cache_bypass); + debugfs_create_file("cache_bypass", 0600, map->debugfs, + &map->cache_bypass, + ®map_cache_bypass_fops); } next = rb_first(&map->range_tree); diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c index 4b76e3311..1a8ec3b2b 100644 --- a/drivers/base/regmap/regmap-i2c.c +++ b/drivers/base/regmap/regmap-i2c.c @@ -209,11 +209,60 @@ static struct regmap_bus regmap_i2c = { .val_format_endian_default = REGMAP_ENDIAN_BIG, }; +static int regmap_i2c_smbus_i2c_write(void *context, const void *data, + size_t count) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + + if (count < 1) + return -EINVAL; + if (count >= I2C_SMBUS_BLOCK_MAX) + return -E2BIG; + + --count; + return i2c_smbus_write_i2c_block_data(i2c, ((u8 *)data)[0], count, + ((u8 *)data + 1)); +} + +static int regmap_i2c_smbus_i2c_read(void *context, const void *reg, + size_t reg_size, void *val, + size_t val_size) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + int ret; + + if (reg_size != 1 || val_size < 1) + return -EINVAL; + if (val_size >= I2C_SMBUS_BLOCK_MAX) + return -E2BIG; + + ret = i2c_smbus_read_i2c_block_data(i2c, ((u8 *)reg)[0], val_size, val); + if (ret == val_size) + return 0; + else if (ret < 0) + return ret; + else + return -EIO; +} + +static struct regmap_bus regmap_i2c_smbus_i2c_block = { + .write = regmap_i2c_smbus_i2c_write, + .read = regmap_i2c_smbus_i2c_read, + .max_raw_read = I2C_SMBUS_BLOCK_MAX, + .max_raw_write = I2C_SMBUS_BLOCK_MAX, +}; + static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c, const struct regmap_config *config) { if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C)) return ®map_i2c; + else if (config->reg_bits == 8 && + i2c_check_functionality(i2c->adapter, + I2C_FUNC_SMBUS_I2C_BLOCK)) + return ®map_i2c_smbus_i2c_block; else if (config->val_bits == 16 && config->reg_bits == 8 && i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_WORD_DATA)) @@ -233,47 +282,34 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c, return ERR_PTR(-ENOTSUPP); } -/** - * regmap_init_i2c(): Initialise register map - * - * @i2c: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. - */ -struct regmap *regmap_init_i2c(struct i2c_client *i2c, - const struct regmap_config *config) +struct regmap *__regmap_init_i2c(struct i2c_client *i2c, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) { const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config); if (IS_ERR(bus)) return ERR_CAST(bus); - return regmap_init(&i2c->dev, bus, &i2c->dev, config); + return __regmap_init(&i2c->dev, bus, &i2c->dev, config, + lock_key, lock_name); } -EXPORT_SYMBOL_GPL(regmap_init_i2c); +EXPORT_SYMBOL_GPL(__regmap_init_i2c); -/** - * devm_regmap_init_i2c(): Initialise managed register map - * - * @i2c: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap. The regmap will be automatically freed by the - * device management code. - */ -struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c, - const struct regmap_config *config) +struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) { const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config); if (IS_ERR(bus)) return ERR_CAST(bus); - return devm_regmap_init(&i2c->dev, bus, &i2c->dev, config); + return __devm_regmap_init(&i2c->dev, bus, &i2c->dev, config, + lock_key, lock_name); } -EXPORT_SYMBOL_GPL(devm_regmap_init_i2c); +EXPORT_SYMBOL_GPL(__devm_regmap_init_i2c); MODULE_LICENSE("GPL"); diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 2597600a5..38d1f72d8 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -209,7 +209,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) * Read in the statuses, using a single bulk read if possible * in order to reduce the I/O overheads. */ - if (!map->use_single_rw && map->reg_stride == 1 && + if (!map->use_single_read && map->reg_stride == 1 && data->irq_reg_stride == 1) { u8 *buf8 = data->status_reg_buf; u16 *buf16 = data->status_reg_buf; @@ -398,7 +398,7 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, else d->irq_reg_stride = 1; - if (!map->use_single_rw && map->reg_stride == 1 && + if (!map->use_single_read && map->reg_stride == 1 && d->irq_reg_stride == 1) { d->status_reg_buf = kmalloc(map->format.val_bytes * chip->num_regs, GFP_KERNEL); diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c index 04a329a37..426a57e41 100644 --- a/drivers/base/regmap/regmap-mmio.c +++ b/drivers/base/regmap/regmap-mmio.c @@ -296,20 +296,11 @@ err_free: return ERR_PTR(ret); } -/** - * regmap_init_mmio_clk(): Initialise register map with register clock - * - * @dev: Device that will be interacted with - * @clk_id: register clock consumer ID - * @regs: Pointer to memory-mapped IO region - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. - */ -struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id, - void __iomem *regs, - const struct regmap_config *config) +struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id, + void __iomem *regs, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) { struct regmap_mmio_context *ctx; @@ -317,25 +308,17 @@ struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id, if (IS_ERR(ctx)) return ERR_CAST(ctx); - return regmap_init(dev, ®map_mmio, ctx, config); + return __regmap_init(dev, ®map_mmio, ctx, config, + lock_key, lock_name); } -EXPORT_SYMBOL_GPL(regmap_init_mmio_clk); - -/** - * devm_regmap_init_mmio_clk(): Initialise managed register map with clock - * - * @dev: Device that will be interacted with - * @clk_id: register clock consumer ID - * @regs: Pointer to memory-mapped IO region - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap. The regmap will be automatically freed by the - * device management code. - */ -struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id, - void __iomem *regs, - const struct regmap_config *config) +EXPORT_SYMBOL_GPL(__regmap_init_mmio_clk); + +struct regmap *__devm_regmap_init_mmio_clk(struct device *dev, + const char *clk_id, + void __iomem *regs, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) { struct regmap_mmio_context *ctx; @@ -343,8 +326,9 @@ struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id, if (IS_ERR(ctx)) return ERR_CAST(ctx); - return devm_regmap_init(dev, ®map_mmio, ctx, config); + return __devm_regmap_init(dev, ®map_mmio, ctx, config, + lock_key, lock_name); } -EXPORT_SYMBOL_GPL(devm_regmap_init_mmio_clk); +EXPORT_SYMBOL_GPL(__devm_regmap_init_mmio_clk); MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c index 53d1148e8..edd9a839d 100644 --- a/drivers/base/regmap/regmap-spi.c +++ b/drivers/base/regmap/regmap-spi.c @@ -113,37 +113,24 @@ static struct regmap_bus regmap_spi = { .val_format_endian_default = REGMAP_ENDIAN_BIG, }; -/** - * regmap_init_spi(): Initialise register map - * - * @spi: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. - */ -struct regmap *regmap_init_spi(struct spi_device *spi, - const struct regmap_config *config) +struct regmap *__regmap_init_spi(struct spi_device *spi, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) { - return regmap_init(&spi->dev, ®map_spi, &spi->dev, config); + return __regmap_init(&spi->dev, ®map_spi, &spi->dev, config, + lock_key, lock_name); } -EXPORT_SYMBOL_GPL(regmap_init_spi); +EXPORT_SYMBOL_GPL(__regmap_init_spi); -/** - * devm_regmap_init_spi(): Initialise register map - * - * @spi: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap. The map will be automatically freed by the - * device management code. - */ -struct regmap *devm_regmap_init_spi(struct spi_device *spi, - const struct regmap_config *config) +struct regmap *__devm_regmap_init_spi(struct spi_device *spi, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) { - return devm_regmap_init(&spi->dev, ®map_spi, &spi->dev, config); + return __devm_regmap_init(&spi->dev, ®map_spi, &spi->dev, config, + lock_key, lock_name); } -EXPORT_SYMBOL_GPL(devm_regmap_init_spi); +EXPORT_SYMBOL_GPL(__devm_regmap_init_spi); MODULE_LICENSE("GPL"); diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c index d7026dc33..7e58f6560 100644 --- a/drivers/base/regmap/regmap-spmi.c +++ b/drivers/base/regmap/regmap-spmi.c @@ -91,36 +91,25 @@ static struct regmap_bus regmap_spmi_base = { .val_format_endian_default = REGMAP_ENDIAN_NATIVE, }; -/** - * regmap_init_spmi_base(): Create regmap for the Base register space - * @sdev: SPMI device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. - */ -struct regmap *regmap_init_spmi_base(struct spmi_device *sdev, - const struct regmap_config *config) +struct regmap *__regmap_init_spmi_base(struct spmi_device *sdev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) { - return regmap_init(&sdev->dev, ®map_spmi_base, sdev, config); + return __regmap_init(&sdev->dev, ®map_spmi_base, sdev, config, + lock_key, lock_name); } -EXPORT_SYMBOL_GPL(regmap_init_spmi_base); +EXPORT_SYMBOL_GPL(__regmap_init_spmi_base); -/** - * devm_regmap_init_spmi_base(): Create managed regmap for Base register space - * @sdev: SPMI device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap. The regmap will be automatically freed by the - * device management code. - */ -struct regmap *devm_regmap_init_spmi_base(struct spmi_device *sdev, - const struct regmap_config *config) +struct regmap *__devm_regmap_init_spmi_base(struct spmi_device *sdev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) { - return devm_regmap_init(&sdev->dev, ®map_spmi_base, sdev, config); + return __devm_regmap_init(&sdev->dev, ®map_spmi_base, sdev, config, + lock_key, lock_name); } -EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_base); +EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_base); static int regmap_spmi_ext_read(void *context, const void *reg, size_t reg_size, @@ -222,35 +211,24 @@ static struct regmap_bus regmap_spmi_ext = { .val_format_endian_default = REGMAP_ENDIAN_NATIVE, }; -/** - * regmap_init_spmi_ext(): Create regmap for Ext register space - * @sdev: Device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. - */ -struct regmap *regmap_init_spmi_ext(struct spmi_device *sdev, - const struct regmap_config *config) +struct regmap *__regmap_init_spmi_ext(struct spmi_device *sdev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) { - return regmap_init(&sdev->dev, ®map_spmi_ext, sdev, config); + return __regmap_init(&sdev->dev, ®map_spmi_ext, sdev, config, + lock_key, lock_name); } -EXPORT_SYMBOL_GPL(regmap_init_spmi_ext); +EXPORT_SYMBOL_GPL(__regmap_init_spmi_ext); -/** - * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space - * @sdev: SPMI device that will be interacted with - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap. The regmap will be automatically freed by the - * device management code. - */ -struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *sdev, - const struct regmap_config *config) +struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *sdev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) { - return devm_regmap_init(&sdev->dev, ®map_spmi_ext, sdev, config); + return __devm_regmap_init(&sdev->dev, ®map_spmi_ext, sdev, config, + lock_key, lock_name); } -EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_ext); +EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_ext); MODULE_LICENSE("GPL"); diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 7111d04f2..afaf56200 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -18,6 +18,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include "trace.h" @@ -34,7 +35,7 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val, - bool *change); + bool *change, bool force_write); static int _regmap_bus_reg_read(void *context, unsigned int reg, unsigned int *val); @@ -93,6 +94,9 @@ bool regmap_writeable(struct regmap *map, unsigned int reg) bool regmap_readable(struct regmap *map, unsigned int reg) { + if (!map->reg_read) + return false; + if (map->max_register && reg > map->max_register) return false; @@ -515,22 +519,12 @@ enum regmap_endian regmap_get_val_endian(struct device *dev, } EXPORT_SYMBOL_GPL(regmap_get_val_endian); -/** - * regmap_init(): Initialise register map - * - * @dev: Device that will be interacted with - * @bus: Bus-specific callbacks to use with device - * @bus_context: Data passed to bus-specific callbacks - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer to - * a struct regmap. This function should generally not be called - * directly, it should be called by bus-specific init functions. - */ -struct regmap *regmap_init(struct device *dev, - const struct regmap_bus *bus, - void *bus_context, - const struct regmap_config *config) +struct regmap *__regmap_init(struct device *dev, + const struct regmap_bus *bus, + void *bus_context, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) { struct regmap *map; int ret = -EINVAL; @@ -556,10 +550,14 @@ struct regmap *regmap_init(struct device *dev, spin_lock_init(&map->spinlock); map->lock = regmap_lock_spinlock; map->unlock = regmap_unlock_spinlock; + lockdep_set_class_and_name(&map->spinlock, + lock_key, lock_name); } else { mutex_init(&map->mutex); map->lock = regmap_lock_mutex; map->unlock = regmap_unlock_mutex; + lockdep_set_class_and_name(&map->mutex, + lock_key, lock_name); } map->lock_arg = map; } @@ -573,8 +571,13 @@ struct regmap *regmap_init(struct device *dev, map->reg_stride = config->reg_stride; else map->reg_stride = 1; - map->use_single_rw = config->use_single_rw; - map->can_multi_write = config->can_multi_write; + map->use_single_read = config->use_single_rw || !bus || !bus->read; + map->use_single_write = config->use_single_rw || !bus || !bus->write; + map->can_multi_write = config->can_multi_write && bus && bus->write; + if (bus) { + map->max_raw_read = bus->max_raw_read; + map->max_raw_write = bus->max_raw_write; + } map->dev = dev; map->bus = bus; map->bus_context = bus_context; @@ -763,7 +766,7 @@ struct regmap *regmap_init(struct device *dev, if ((reg_endian != REGMAP_ENDIAN_BIG) || (val_endian != REGMAP_ENDIAN_BIG)) goto err_map; - map->use_single_rw = true; + map->use_single_write = true; } if (!map->format.format_write && @@ -899,30 +902,19 @@ err_map: err: return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(regmap_init); +EXPORT_SYMBOL_GPL(__regmap_init); static void devm_regmap_release(struct device *dev, void *res) { regmap_exit(*(struct regmap **)res); } -/** - * devm_regmap_init(): Initialise managed register map - * - * @dev: Device that will be interacted with - * @bus: Bus-specific callbacks to use with device - * @bus_context: Data passed to bus-specific callbacks - * @config: Configuration for register map - * - * The return value will be an ERR_PTR() on error or a valid pointer - * to a struct regmap. This function should generally not be called - * directly, it should be called by bus-specific init functions. The - * map will be automatically freed by the device management code. - */ -struct regmap *devm_regmap_init(struct device *dev, - const struct regmap_bus *bus, - void *bus_context, - const struct regmap_config *config) +struct regmap *__devm_regmap_init(struct device *dev, + const struct regmap_bus *bus, + void *bus_context, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) { struct regmap **ptr, *regmap; @@ -930,7 +922,8 @@ struct regmap *devm_regmap_init(struct device *dev, if (!ptr) return ERR_PTR(-ENOMEM); - regmap = regmap_init(dev, bus, bus_context, config); + regmap = __regmap_init(dev, bus, bus_context, config, + lock_key, lock_name); if (!IS_ERR(regmap)) { *ptr = regmap; devres_add(dev, ptr); @@ -940,7 +933,7 @@ struct regmap *devm_regmap_init(struct device *dev, return regmap; } -EXPORT_SYMBOL_GPL(devm_regmap_init); +EXPORT_SYMBOL_GPL(__devm_regmap_init); static void regmap_field_init(struct regmap_field *rm_field, struct regmap *regmap, struct reg_field reg_field) @@ -1178,7 +1171,7 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg, ret = _regmap_update_bits(map, range->selector_reg, range->selector_mask, win_page << range->selector_shift, - &page_chg); + &page_chg, false); map->work_buf = orig_work_buf; @@ -1382,10 +1375,33 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, */ bool regmap_can_raw_write(struct regmap *map) { - return map->bus && map->format.format_val && map->format.format_reg; + return map->bus && map->bus->write && map->format.format_val && + map->format.format_reg; } EXPORT_SYMBOL_GPL(regmap_can_raw_write); +/** + * regmap_get_raw_read_max - Get the maximum size we can read + * + * @map: Map to check. + */ +size_t regmap_get_raw_read_max(struct regmap *map) +{ + return map->max_raw_read; +} +EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); + +/** + * regmap_get_raw_write_max - Get the maximum size we can read + * + * @map: Map to check. + */ +size_t regmap_get_raw_write_max(struct regmap *map) +{ + return map->max_raw_write; +} +EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); + static int _regmap_bus_formatted_write(void *context, unsigned int reg, unsigned int val) { @@ -1555,6 +1571,8 @@ int regmap_raw_write(struct regmap *map, unsigned int reg, return -EINVAL; if (val_len % map->format.val_bytes) return -EINVAL; + if (map->max_raw_write && map->max_raw_write > val_len) + return -E2BIG; map->lock(map->lock_arg); @@ -1624,6 +1642,18 @@ int regmap_fields_write(struct regmap_field *field, unsigned int id, } EXPORT_SYMBOL_GPL(regmap_fields_write); +int regmap_fields_force_write(struct regmap_field *field, unsigned int id, + unsigned int val) +{ + if (id >= field->id_size) + return -EINVAL; + + return regmap_write_bits(field->regmap, + field->reg + (field->id_offset * id), + field->mask, val << field->shift); +} +EXPORT_SYMBOL_GPL(regmap_fields_force_write); + /** * regmap_fields_update_bits(): Perform a read/modify/write cycle * on the register field @@ -1669,6 +1699,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, { int ret = 0, i; size_t val_bytes = map->format.val_bytes; + size_t total_size = val_bytes * val_count; if (map->bus && !map->format.parse_inplace) return -EINVAL; @@ -1677,9 +1708,15 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, /* * Some devices don't support bulk write, for - * them we have a series of single write operations. + * them we have a series of single write operations in the first two if + * blocks. + * + * The first if block is used for memory mapped io. It does not allow + * val_bytes of 3 for example. + * The second one is used for busses which do not have this limitation + * and can write arbitrary value lengths. */ - if (!map->bus || map->use_single_rw) { + if (!map->bus) { map->lock(map->lock_arg); for (i = 0; i < val_count; i++) { unsigned int ival; @@ -1711,6 +1748,38 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, } out: map->unlock(map->lock_arg); + } else if (map->use_single_write || + (map->max_raw_write && map->max_raw_write < total_size)) { + int chunk_stride = map->reg_stride; + size_t chunk_size = val_bytes; + size_t chunk_count = val_count; + + if (!map->use_single_write) { + chunk_size = map->max_raw_write; + if (chunk_size % val_bytes) + chunk_size -= chunk_size % val_bytes; + chunk_count = total_size / chunk_size; + chunk_stride *= chunk_size / val_bytes; + } + + map->lock(map->lock_arg); + /* Write as many bytes as possible with chunk_size */ + for (i = 0; i < chunk_count; i++) { + ret = _regmap_raw_write(map, + reg + (i * chunk_stride), + val + (i * chunk_size), + chunk_size); + if (ret) + break; + } + + /* Write remaining bytes */ + if (!ret && chunk_size * i < total_size) { + ret = _regmap_raw_write(map, reg + (i * chunk_stride), + val + (i * chunk_size), + total_size - i * chunk_size); + } + map->unlock(map->lock_arg); } else { void *wval; @@ -1740,10 +1809,10 @@ EXPORT_SYMBOL_GPL(regmap_bulk_write); * * the (register,newvalue) pairs in regs have not been formatted, but * they are all in the same page and have been changed to being page - * relative. The page register has been written if that was neccessary. + * relative. The page register has been written if that was necessary. */ static int _regmap_raw_multi_reg_write(struct regmap *map, - const struct reg_default *regs, + const struct reg_sequence *regs, size_t num_regs) { int ret; @@ -1768,8 +1837,8 @@ static int _regmap_raw_multi_reg_write(struct regmap *map, u8 = buf; for (i = 0; i < num_regs; i++) { - int reg = regs[i].reg; - int val = regs[i].def; + unsigned int reg = regs[i].reg; + unsigned int val = regs[i].def; trace_regmap_hw_write_start(map, reg, 1); map->format.format_reg(u8, reg, map->reg_shift); u8 += reg_bytes + pad_bytes; @@ -1800,17 +1869,19 @@ static unsigned int _regmap_register_page(struct regmap *map, } static int _regmap_range_multi_paged_reg_write(struct regmap *map, - struct reg_default *regs, + struct reg_sequence *regs, size_t num_regs) { int ret; int i, n; - struct reg_default *base; + struct reg_sequence *base; unsigned int this_page = 0; + unsigned int page_change = 0; /* * the set of registers are not neccessarily in order, but * since the order of write must be preserved this algorithm - * chops the set each time the page changes + * chops the set each time the page changes. This also applies + * if there is a delay required at any point in the sequence. */ base = regs; for (i = 0, n = 0; i < num_regs; i++, n++) { @@ -1826,16 +1897,48 @@ static int _regmap_range_multi_paged_reg_write(struct regmap *map, this_page = win_page; if (win_page != this_page) { this_page = win_page; + page_change = 1; + } + } + + /* If we have both a page change and a delay make sure to + * write the regs and apply the delay before we change the + * page. + */ + + if (page_change || regs[i].delay_us) { + + /* For situations where the first write requires + * a delay we need to make sure we don't call + * raw_multi_reg_write with n=0 + * This can't occur with page breaks as we + * never write on the first iteration + */ + if (regs[i].delay_us && i == 0) + n = 1; + ret = _regmap_raw_multi_reg_write(map, base, n); if (ret != 0) return ret; + + if (regs[i].delay_us) + udelay(regs[i].delay_us); + base += n; n = 0; - } - ret = _regmap_select_page(map, &base[n].reg, range, 1); - if (ret != 0) - return ret; + + if (page_change) { + ret = _regmap_select_page(map, + &base[n].reg, + range, 1); + if (ret != 0) + return ret; + + page_change = 0; + } + } + } if (n > 0) return _regmap_raw_multi_reg_write(map, base, n); @@ -1843,7 +1946,7 @@ static int _regmap_range_multi_paged_reg_write(struct regmap *map, } static int _regmap_multi_reg_write(struct regmap *map, - const struct reg_default *regs, + const struct reg_sequence *regs, size_t num_regs) { int i; @@ -1854,6 +1957,9 @@ static int _regmap_multi_reg_write(struct regmap *map, ret = _regmap_write(map, regs[i].reg, regs[i].def); if (ret != 0) return ret; + + if (regs[i].delay_us) + udelay(regs[i].delay_us); } return 0; } @@ -1893,10 +1999,14 @@ static int _regmap_multi_reg_write(struct regmap *map, for (i = 0; i < num_regs; i++) { unsigned int reg = regs[i].reg; struct regmap_range_node *range; + + /* Coalesce all the writes between a page break or a delay + * in a sequence + */ range = _regmap_range_lookup(map, reg); - if (range) { - size_t len = sizeof(struct reg_default)*num_regs; - struct reg_default *base = kmemdup(regs, len, + if (range || regs[i].delay_us) { + size_t len = sizeof(struct reg_sequence)*num_regs; + struct reg_sequence *base = kmemdup(regs, len, GFP_KERNEL); if (!base) return -ENOMEM; @@ -1929,7 +2039,7 @@ static int _regmap_multi_reg_write(struct regmap *map, * A value of zero will be returned on success, a negative errno will be * returned in error cases. */ -int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs, +int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, int num_regs) { int ret; @@ -1962,7 +2072,7 @@ EXPORT_SYMBOL_GPL(regmap_multi_reg_write); * be returned in error cases. */ int regmap_multi_reg_write_bypassed(struct regmap *map, - const struct reg_default *regs, + const struct reg_sequence *regs, int num_regs) { int ret; @@ -2050,7 +2160,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, /* * Some buses or devices flag reads by setting the high bits in the - * register addresss; since it's always the high bits for all + * register address; since it's always the high bits for all * current formats we can do this here rather than in * formatting. This may break if we get interesting formats. */ @@ -2097,8 +2207,6 @@ static int _regmap_read(struct regmap *map, unsigned int reg, int ret; void *context = _regmap_map_get_context(map); - WARN_ON(!map->reg_read); - if (!map->cache_bypass) { ret = regcache_read(map, reg, val); if (ret == 0) @@ -2179,11 +2287,22 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, return -EINVAL; if (reg % map->reg_stride) return -EINVAL; + if (val_count == 0) + return -EINVAL; map->lock(map->lock_arg); if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || map->cache_type == REGCACHE_NONE) { + if (!map->bus->read) { + ret = -ENOTSUPP; + goto out; + } + if (map->max_raw_read && map->max_raw_read < val_len) { + ret = -E2BIG; + goto out; + } + /* Physical block read if there's no cache involved */ ret = _regmap_raw_read(map, reg, val, val_len); @@ -2292,20 +2411,51 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, * Some devices does not support bulk read, for * them we have a series of single read operations. */ - if (map->use_single_rw) { - for (i = 0; i < val_count; i++) { - ret = regmap_raw_read(map, - reg + (i * map->reg_stride), - val + (i * val_bytes), - val_bytes); - if (ret != 0) - return ret; - } - } else { + size_t total_size = val_bytes * val_count; + + if (!map->use_single_read && + (!map->max_raw_read || map->max_raw_read > total_size)) { ret = regmap_raw_read(map, reg, val, val_bytes * val_count); if (ret != 0) return ret; + } else { + /* + * Some devices do not support bulk read or do not + * support large bulk reads, for them we have a series + * of read operations. + */ + int chunk_stride = map->reg_stride; + size_t chunk_size = val_bytes; + size_t chunk_count = val_count; + + if (!map->use_single_read) { + chunk_size = map->max_raw_read; + if (chunk_size % val_bytes) + chunk_size -= chunk_size % val_bytes; + chunk_count = total_size / chunk_size; + chunk_stride *= chunk_size / val_bytes; + } + + /* Read bytes that fit into a multiple of chunk_size */ + for (i = 0; i < chunk_count; i++) { + ret = regmap_raw_read(map, + reg + (i * chunk_stride), + val + (i * chunk_size), + chunk_size); + if (ret != 0) + return ret; + } + + /* Read remaining bytes */ + if (chunk_size * i < total_size) { + ret = regmap_raw_read(map, + reg + (i * chunk_stride), + val + (i * chunk_size), + total_size - i * chunk_size); + if (ret != 0) + return ret; + } } for (i = 0; i < val_count * val_bytes; i += val_bytes) @@ -2317,7 +2467,34 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, &ival); if (ret != 0) return ret; - map->format.format_val(val + (i * val_bytes), ival, 0); + + if (map->format.format_val) { + map->format.format_val(val + (i * val_bytes), ival, 0); + } else { + /* Devices providing read and write + * operations can use the bulk I/O + * functions if they define a val_bytes, + * we assume that the values are native + * endian. + */ + u32 *u32 = val; + u16 *u16 = val; + u8 *u8 = val; + + switch (map->format.val_bytes) { + case 4: + u32[i] = ival; + break; + case 2: + u16[i] = ival; + break; + case 1: + u8[i] = ival; + break; + default: + return -EINVAL; + } + } } } @@ -2327,7 +2504,7 @@ EXPORT_SYMBOL_GPL(regmap_bulk_read); static int _regmap_update_bits(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val, - bool *change) + bool *change, bool force_write) { int ret; unsigned int tmp, orig; @@ -2339,7 +2516,7 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg, tmp = orig & ~mask; tmp |= val & mask; - if (tmp != orig) { + if (force_write || (tmp != orig)) { ret = _regmap_write(map, reg, tmp); if (change) *change = true; @@ -2367,13 +2544,36 @@ int regmap_update_bits(struct regmap *map, unsigned int reg, int ret; map->lock(map->lock_arg); - ret = _regmap_update_bits(map, reg, mask, val, NULL); + ret = _regmap_update_bits(map, reg, mask, val, NULL, false); map->unlock(map->lock_arg); return ret; } EXPORT_SYMBOL_GPL(regmap_update_bits); +/** + * regmap_write_bits: Perform a read/modify/write cycle on the register map + * + * @map: Register map to update + * @reg: Register to update + * @mask: Bitmask to change + * @val: New value for bitmask + * + * Returns zero for success, a negative number on error. + */ +int regmap_write_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + int ret; + + map->lock(map->lock_arg); + ret = _regmap_update_bits(map, reg, mask, val, NULL, true); + map->unlock(map->lock_arg); + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_write_bits); + /** * regmap_update_bits_async: Perform a read/modify/write cycle on the register * map asynchronously @@ -2398,7 +2598,7 @@ int regmap_update_bits_async(struct regmap *map, unsigned int reg, map->async = true; - ret = _regmap_update_bits(map, reg, mask, val, NULL); + ret = _regmap_update_bits(map, reg, mask, val, NULL, false); map->async = false; @@ -2427,7 +2627,7 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg, int ret; map->lock(map->lock_arg); - ret = _regmap_update_bits(map, reg, mask, val, change); + ret = _regmap_update_bits(map, reg, mask, val, change, false); map->unlock(map->lock_arg); return ret; } @@ -2460,7 +2660,7 @@ int regmap_update_bits_check_async(struct regmap *map, unsigned int reg, map->async = true; - ret = _regmap_update_bits(map, reg, mask, val, change); + ret = _regmap_update_bits(map, reg, mask, val, change, false); map->async = false; @@ -2552,10 +2752,10 @@ EXPORT_SYMBOL_GPL(regmap_async_complete); * The caller must ensure that this function cannot be called * concurrently with either itself or regcache_sync(). */ -int regmap_register_patch(struct regmap *map, const struct reg_default *regs, +int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, int num_regs) { - struct reg_default *p; + struct reg_sequence *p; int ret; bool bypass; @@ -2564,7 +2764,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs, return 0; p = krealloc(map->patch, - sizeof(struct reg_default) * (map->patch_regs + num_regs), + sizeof(struct reg_sequence) * (map->patch_regs + num_regs), GFP_KERNEL); if (p) { memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig index be5fffb6d..023d448ed 100644 --- a/drivers/bcma/Kconfig +++ b/drivers/bcma/Kconfig @@ -92,7 +92,7 @@ config BCMA_DRIVER_GMAC_CMN config BCMA_DRIVER_GPIO bool "BCMA GPIO driver" depends on BCMA && GPIOLIB - select IRQ_DOMAIN if BCMA_HOST_SOC + select GPIOLIB_IRQCHIP if BCMA_HOST_SOC help Driver to provide access to the GPIO pins of the bcma bus. diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index 15f2b2e24..38f156745 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h @@ -34,6 +34,7 @@ int __init bcma_bus_early_register(struct bcma_bus *bus); int bcma_bus_suspend(struct bcma_bus *bus); int bcma_bus_resume(struct bcma_bus *bus); #endif +struct device *bcma_bus_get_host_dev(struct bcma_bus *bus); /* scan.c */ void bcma_detect_chip(struct bcma_bus *bus); diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c index 5f6018e7c..504899a72 100644 --- a/drivers/bcma/driver_gpio.c +++ b/drivers/bcma/driver_gpio.c @@ -8,10 +8,8 @@ * Licensed under the GNU/GPL. See COPYING for details. */ -#include -#include +#include #include -#include #include #include @@ -79,19 +77,11 @@ static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio) } #if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X) -static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) -{ - struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip); - - if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC) - return irq_find_mapping(cc->irq_domain, gpio); - else - return -EINVAL; -} static void bcma_gpio_irq_unmask(struct irq_data *d) { - struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct bcma_drv_cc *cc = bcma_gpio_get_cc(gc); int gpio = irqd_to_hwirq(d); u32 val = bcma_chipco_gpio_in(cc, BIT(gpio)); @@ -101,7 +91,8 @@ static void bcma_gpio_irq_unmask(struct irq_data *d) static void bcma_gpio_irq_mask(struct irq_data *d) { - struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct bcma_drv_cc *cc = bcma_gpio_get_cc(gc); int gpio = irqd_to_hwirq(d); bcma_chipco_gpio_intmask(cc, BIT(gpio), 0); @@ -116,6 +107,7 @@ static struct irq_chip bcma_gpio_irq_chip = { static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id) { struct bcma_drv_cc *cc = dev_id; + struct gpio_chip *gc = &cc->gpio; u32 val = bcma_cc_read32(cc, BCMA_CC_GPIOIN); u32 mask = bcma_cc_read32(cc, BCMA_CC_GPIOIRQ); u32 pol = bcma_cc_read32(cc, BCMA_CC_GPIOPOL); @@ -125,81 +117,58 @@ static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id) if (!irqs) return IRQ_NONE; - for_each_set_bit(gpio, &irqs, cc->gpio.ngpio) - generic_handle_irq(bcma_gpio_to_irq(&cc->gpio, gpio)); + for_each_set_bit(gpio, &irqs, gc->ngpio) + generic_handle_irq(irq_find_mapping(gc->irqdomain, gpio)); bcma_chipco_gpio_polarity(cc, irqs, val & irqs); return IRQ_HANDLED; } -static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc) +static int bcma_gpio_irq_init(struct bcma_drv_cc *cc) { struct gpio_chip *chip = &cc->gpio; - int gpio, hwirq, err; + int hwirq, err; if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC) return 0; - cc->irq_domain = irq_domain_add_linear(NULL, chip->ngpio, - &irq_domain_simple_ops, cc); - if (!cc->irq_domain) { - err = -ENODEV; - goto err_irq_domain; - } - for (gpio = 0; gpio < chip->ngpio; gpio++) { - int irq = irq_create_mapping(cc->irq_domain, gpio); - - irq_set_chip_data(irq, cc); - irq_set_chip_and_handler(irq, &bcma_gpio_irq_chip, - handle_simple_irq); - } - hwirq = bcma_core_irq(cc->core, 0); err = request_irq(hwirq, bcma_gpio_irq_handler, IRQF_SHARED, "gpio", cc); if (err) - goto err_req_irq; + return err; bcma_chipco_gpio_intmask(cc, ~0, 0); bcma_cc_set32(cc, BCMA_CC_IRQMASK, BCMA_CC_IRQ_GPIO); - return 0; - -err_req_irq: - for (gpio = 0; gpio < chip->ngpio; gpio++) { - int irq = irq_find_mapping(cc->irq_domain, gpio); - - irq_dispose_mapping(irq); + err = gpiochip_irqchip_add(chip, + &bcma_gpio_irq_chip, + 0, + handle_simple_irq, + IRQ_TYPE_NONE); + if (err) { + free_irq(hwirq, cc); + return err; } - irq_domain_remove(cc->irq_domain); -err_irq_domain: - return err; + + return 0; } -static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc) +static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc) { - struct gpio_chip *chip = &cc->gpio; - int gpio; - if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC) return; bcma_cc_mask32(cc, BCMA_CC_IRQMASK, ~BCMA_CC_IRQ_GPIO); free_irq(bcma_core_irq(cc->core, 0), cc); - for (gpio = 0; gpio < chip->ngpio; gpio++) { - int irq = irq_find_mapping(cc->irq_domain, gpio); - - irq_dispose_mapping(irq); - } - irq_domain_remove(cc->irq_domain); } #else -static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc) +static int bcma_gpio_irq_init(struct bcma_drv_cc *cc) { return 0; } -static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc) +static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc) { } #endif @@ -218,9 +187,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) chip->set = bcma_gpio_set_value; chip->direction_input = bcma_gpio_direction_input; chip->direction_output = bcma_gpio_direction_output; -#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X) - chip->to_irq = bcma_gpio_to_irq; -#endif + chip->owner = THIS_MODULE; + chip->dev = bcma_bus_get_host_dev(bus); #if IS_BUILTIN(CONFIG_OF) if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC) chip->of_node = cc->core->dev.of_node; @@ -248,13 +216,13 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) else chip->base = -1; - err = bcma_gpio_irq_domain_init(cc); + err = gpiochip_add(chip); if (err) return err; - err = gpiochip_add(chip); + err = bcma_gpio_irq_init(cc); if (err) { - bcma_gpio_irq_domain_exit(cc); + gpiochip_remove(chip); return err; } @@ -263,7 +231,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) int bcma_gpio_unregister(struct bcma_drv_cc *cc) { - bcma_gpio_irq_domain_exit(cc); + bcma_gpio_irq_exit(cc); gpiochip_remove(&cc->gpio); return 0; } diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 9635f1033..24882c18f 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -7,11 +7,14 @@ #include "bcma_private.h" #include +#include #include +#include #include #include #include #include +#include MODULE_DESCRIPTION("Broadcom's specific AMBA driver"); MODULE_LICENSE("GPL"); @@ -268,6 +271,28 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core) } } +struct device *bcma_bus_get_host_dev(struct bcma_bus *bus) +{ + switch (bus->hosttype) { + case BCMA_HOSTTYPE_PCI: + if (bus->host_pci) + return &bus->host_pci->dev; + else + return NULL; + case BCMA_HOSTTYPE_SOC: + if (bus->host_pdev) + return &bus->host_pdev->dev; + else + return NULL; + case BCMA_HOSTTYPE_SDIO: + if (bus->host_sdio) + return &bus->host_sdio->dev; + else + return NULL; + } + return NULL; +} + void bcma_init_bus(struct bcma_bus *bus) { mutex_lock(&bcma_buses_mutex); @@ -387,6 +412,7 @@ int bcma_bus_register(struct bcma_bus *bus) { int err; struct bcma_device *core; + struct device *dev; /* Scan for devices (cores) */ err = bcma_bus_scan(bus); @@ -409,6 +435,16 @@ int bcma_bus_register(struct bcma_bus *bus) bcma_core_pci_early_init(&bus->drv_pci[0]); } + dev = bcma_bus_get_host_dev(bus); + /* TODO: remove check for IS_BUILTIN(CONFIG_BCMA) check when + * of_default_bus_match_table is exported or in some other way + * accessible. This is just a temporary workaround. + */ + if (IS_BUILTIN(CONFIG_BCMA) && dev) { + of_platform_populate(dev->of_node, of_default_bus_match_table, + NULL, dev); + } + /* Cores providing flash access go before SPROM init */ list_for_each_entry(core, &bus->cores, list) { if (bcma_is_core_needed_early(core->id.id)) diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 46c282fff..dd73e1ff1 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -395,7 +395,7 @@ aoeblk_gdalloc(void *vp) WARN_ON(d->flags & DEVFL_TKILL); WARN_ON(d->gd); WARN_ON(d->flags & DEVFL_UP); - blk_queue_max_hw_sectors(q, 1024); + blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS); q->backing_dev_info.name = "aoe"; q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE; d->bufpool = mp; diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 422b7d84f..ad80c85e0 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -1110,7 +1110,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail) d->ip.rq = NULL; do { bio = rq->bio; - bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags); + bok = !fastfail && !bio->bi_error; } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size)); /* cf. http://lkml.org/lkml/2006/10/31/28 */ @@ -1172,7 +1172,7 @@ ktiocomplete(struct frame *f) ahout->cmdstat, ahin->cmdstat, d->aoemajor, d->aoeminor); noskb: if (buf) - clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); + buf->bio->bi_error = -EIO; goto out; } @@ -1185,7 +1185,7 @@ noskb: if (buf) "aoe: runt data size in read from", (long) d->aoemajor, d->aoeminor, skb->len, n); - clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); + buf->bio->bi_error = -EIO; break; } if (n > f->iter.bi_size) { @@ -1193,7 +1193,7 @@ noskb: if (buf) "aoe: too-large data size in read from", (long) d->aoemajor, d->aoeminor, n, f->iter.bi_size); - clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); + buf->bio->bi_error = -EIO; break; } bvcpy(skb, f->buf->bio, f->iter, n); @@ -1695,7 +1695,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf) if (buf == NULL) return; buf->iter.bi_size = 0; - clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); + buf->bio->bi_error = -EIO; if (buf->nframesout == 0) aoe_end_buf(d, buf); } diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index e774c50b6..ffd194750 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c @@ -170,7 +170,7 @@ aoe_failip(struct aoedev *d) if (rq == NULL) return; while ((bio = d->ip.nxbio)) { - clear_bit(BIO_UPTODATE, &bio->bi_flags); + bio->bi_error = -EIO; d->ip.nxbio = bio->bi_next; n = (unsigned long) rq->special; rq->special = (void *) --n; diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 64ab4951e..b9794aeeb 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -331,14 +331,12 @@ static void brd_make_request(struct request_queue *q, struct bio *bio) struct bio_vec bvec; sector_t sector; struct bvec_iter iter; - int err = -EIO; sector = bio->bi_iter.bi_sector; if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) - goto out; + goto io_error; if (unlikely(bio->bi_rw & REQ_DISCARD)) { - err = 0; discard_from_brd(brd, sector, bio->bi_iter.bi_size); goto out; } @@ -349,15 +347,20 @@ static void brd_make_request(struct request_queue *q, struct bio *bio) bio_for_each_segment(bvec, bio, iter) { unsigned int len = bvec.bv_len; + int err; + err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset, rw, sector); if (err) - break; + goto io_error; sector += len >> SECTOR_SHIFT; } out: - bio_endio(bio, err); + bio_endio(bio); + return; +io_error: + bio_io_error(bio); } static int brd_rw_page(struct block_device *bdev, sector_t sector, @@ -371,7 +374,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector, #ifdef CONFIG_BLK_DEV_RAM_DAX static long brd_direct_access(struct block_device *bdev, sector_t sector, - void **kaddr, unsigned long *pfn, long size) + void __pmem **kaddr, unsigned long *pfn) { struct brd_device *brd = bdev->bd_disk->private_data; struct page *page; @@ -381,13 +384,9 @@ static long brd_direct_access(struct block_device *bdev, sector_t sector, page = brd_insert_page(brd, sector); if (!page) return -ENOSPC; - *kaddr = page_address(page); + *kaddr = (void __pmem *)page_address(page); *pfn = page_to_pfn(page); - /* - * TODO: If size > PAGE_SIZE, we could look to see if the next page in - * the file happens to be mapped to the next page of physical RAM. - */ return PAGE_SIZE; } #else @@ -500,7 +499,7 @@ static struct brd_device *brd_alloc(int i) blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE); brd->brd_queue->limits.discard_granularity = PAGE_SIZE; - brd->brd_queue->limits.max_discard_sectors = UINT_MAX; + blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX); brd->brd_queue->limits.discard_zeroes_data = 1; queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue); diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 1318e3217..b3868e7a1 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -175,11 +175,11 @@ static int _drbd_md_sync_page_io(struct drbd_device *device, atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */ device->md_io.submit_jif = jiffies; if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) - bio_endio(bio, -EIO); + bio_io_error(bio); else submit_bio(rw, bio); wait_until_done_or_force_detached(device, bdev, &device->md_io.done); - if (bio_flagged(bio, BIO_UPTODATE)) + if (!bio->bi_error) err = device->md_io.error; out: diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 434c77dcc..e5e0f19ce 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -941,36 +941,27 @@ static void drbd_bm_aio_ctx_destroy(struct kref *kref) } /* bv_page may be a copy, or may be the original */ -static void drbd_bm_endio(struct bio *bio, int error) +static void drbd_bm_endio(struct bio *bio) { struct drbd_bm_aio_ctx *ctx = bio->bi_private; struct drbd_device *device = ctx->device; struct drbd_bitmap *b = device->bitmap; unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page); - int uptodate = bio_flagged(bio, BIO_UPTODATE); - - - /* strange behavior of some lower level drivers... - * fail the request by clearing the uptodate flag, - * but do not return any error?! - * do we want to WARN() on this? */ - if (!error && !uptodate) - error = -EIO; if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 && !bm_test_page_unchanged(b->bm_pages[idx])) drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx); - if (error) { + if (bio->bi_error) { /* ctx error will hold the completed-last non-zero error code, * in case error codes differ. */ - ctx->error = error; + ctx->error = bio->bi_error; bm_set_page_io_err(b->bm_pages[idx]); /* Not identical to on disk version of it. * Is BM_PAGE_IO_ERROR enough? */ if (__ratelimit(&drbd_ratelimit_state)) drbd_err(device, "IO ERROR %d on bitmap page idx %u\n", - error, idx); + bio->bi_error, idx); } else { bm_clear_page_io_err(b->bm_pages[idx]); dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx); @@ -1031,7 +1022,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { bio->bi_rw |= rw; - bio_endio(bio, -EIO); + bio_io_error(bio); } else { submit_bio(rw, bio); /* this should not count as user activity and cause the diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index efd19c2da..015c6e91b 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1450,7 +1450,6 @@ extern void do_submit(struct work_struct *ws); extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long); extern void drbd_make_request(struct request_queue *q, struct bio *bio); extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req); -extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec); extern int is_valid_ar_handle(struct drbd_request *, sector_t); @@ -1481,9 +1480,9 @@ extern int drbd_khelper(struct drbd_device *device, char *cmd); /* drbd_worker.c */ /* bi_end_io handlers */ -extern void drbd_md_endio(struct bio *bio, int error); -extern void drbd_peer_request_endio(struct bio *bio, int error); -extern void drbd_request_endio(struct bio *bio, int error); +extern void drbd_md_endio(struct bio *bio); +extern void drbd_peer_request_endio(struct bio *bio); +extern void drbd_request_endio(struct bio *bio); extern int drbd_worker(struct drbd_thread *thi); enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor); void drbd_resync_after_changed(struct drbd_device *device); @@ -1604,12 +1603,13 @@ static inline void drbd_generic_make_request(struct drbd_device *device, __release(local); if (!bio->bi_bdev) { drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n"); - bio_endio(bio, -ENODEV); + bio->bi_error = -ENODEV; + bio_endio(bio); return; } if (drbd_insert_fault(device, fault_type)) - bio_endio(bio, -EIO); + bio_io_error(bio); else generic_make_request(bio); } diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index a1518539b..74d97f4ba 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2774,7 +2774,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig This triggers a max_bio_size message upon first attach or connect */ blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); - blk_queue_merge_bvec(q, drbd_merge_bvec); q->queue_lock = &resource->req_lock; device->md_io.page = alloc_page(GFP_KERNEL); diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 74df8cfad..e80cbefbc 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1156,14 +1156,14 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi /* For now, don't allow more than one activity log extent worth of data * to be discarded in one go. We may need to rework drbd_al_begin_io() * to allow for even larger discard ranges */ - q->limits.max_discard_sectors = DRBD_MAX_DISCARD_SECTORS; + blk_queue_max_discard_sectors(q, DRBD_MAX_DISCARD_SECTORS); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); /* REALLY? Is stacking secdiscard "legal"? */ if (blk_queue_secdiscard(b)) queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); } else { - q->limits.max_discard_sectors = 0; + blk_queue_max_discard_sectors(q, 0); queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); queue_flag_clear_unlocked(QUEUE_FLAG_SECDISCARD, q); } diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 3907202fb..211592682 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -201,7 +201,8 @@ void start_new_tl_epoch(struct drbd_connection *connection) void complete_master_bio(struct drbd_device *device, struct bio_and_error *m) { - bio_endio(m->bio, m->error); + m->bio->bi_error = m->error; + bio_endio(m->bio); dec_ap_bio(device); } @@ -1153,12 +1154,12 @@ drbd_submit_req_private_bio(struct drbd_request *req) rw == WRITE ? DRBD_FAULT_DT_WR : rw == READ ? DRBD_FAULT_DT_RD : DRBD_FAULT_DT_RA)) - bio_endio(bio, -EIO); + bio_io_error(bio); else generic_make_request(bio); put_ldev(device); } else - bio_endio(bio, -EIO); + bio_io_error(bio); } static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req) @@ -1191,7 +1192,8 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long /* only pass the error to the upper layers. * if user cannot handle io errors, that's not our business. */ drbd_err(device, "could not kmalloc() req\n"); - bio_endio(bio, -ENOMEM); + bio->bi_error = -ENOMEM; + bio_endio(bio); return ERR_PTR(-ENOMEM); } req->start_jif = start_jif; @@ -1497,6 +1499,8 @@ void drbd_make_request(struct request_queue *q, struct bio *bio) struct drbd_device *device = (struct drbd_device *) q->queuedata; unsigned long start_jif; + blk_queue_split(q, &bio, q->bio_split); + start_jif = jiffies; /* @@ -1508,41 +1512,6 @@ void drbd_make_request(struct request_queue *q, struct bio *bio) __drbd_make_request(device, bio, start_jif); } -/* This is called by bio_add_page(). - * - * q->max_hw_sectors and other global limits are already enforced there. - * - * We need to call down to our lower level device, - * in case it has special restrictions. - * - * We also may need to enforce configured max-bio-bvecs limits. - * - * As long as the BIO is empty we have to allow at least one bvec, - * regardless of size and offset, so no need to ask lower levels. - */ -int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec) -{ - struct drbd_device *device = (struct drbd_device *) q->queuedata; - unsigned int bio_size = bvm->bi_size; - int limit = DRBD_MAX_BIO_SIZE; - int backing_limit; - - if (bio_size && get_ldev(device)) { - unsigned int max_hw_sectors = queue_max_hw_sectors(q); - struct request_queue * const b = - device->ldev->backing_bdev->bd_disk->queue; - if (b->merge_bvec_fn) { - bvm->bi_bdev = device->ldev->backing_bdev; - backing_limit = b->merge_bvec_fn(b, bvm, bvec); - limit = min(limit, backing_limit); - } - put_ldev(device); - if ((limit >> 9) > max_hw_sectors) - limit = max_hw_sectors << 9; - } - return limit; -} - void request_timer_fn(unsigned long data) { struct drbd_device *device = (struct drbd_device *) data; diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index d0fae55d8..5578c1477 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -65,12 +65,12 @@ rwlock_t global_state_lock; /* used for synchronous meta data and bitmap IO * submitted by drbd_md_sync_page_io() */ -void drbd_md_endio(struct bio *bio, int error) +void drbd_md_endio(struct bio *bio) { struct drbd_device *device; device = bio->bi_private; - device->md_io.error = error; + device->md_io.error = bio->bi_error; /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able * to timeout on the lower level device, and eventually detach from it. @@ -170,31 +170,20 @@ void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(l /* writes on behalf of the partner, or resync writes, * "submitted" by the receiver. */ -void drbd_peer_request_endio(struct bio *bio, int error) +void drbd_peer_request_endio(struct bio *bio) { struct drbd_peer_request *peer_req = bio->bi_private; struct drbd_device *device = peer_req->peer_device->device; - int uptodate = bio_flagged(bio, BIO_UPTODATE); int is_write = bio_data_dir(bio) == WRITE; int is_discard = !!(bio->bi_rw & REQ_DISCARD); - if (error && __ratelimit(&drbd_ratelimit_state)) + if (bio->bi_error && __ratelimit(&drbd_ratelimit_state)) drbd_warn(device, "%s: error=%d s=%llus\n", is_write ? (is_discard ? "discard" : "write") - : "read", error, + : "read", bio->bi_error, (unsigned long long)peer_req->i.sector); - if (!error && !uptodate) { - if (__ratelimit(&drbd_ratelimit_state)) - drbd_warn(device, "%s: setting error to -EIO s=%llus\n", - is_write ? "write" : "read", - (unsigned long long)peer_req->i.sector); - /* strange behavior of some lower level drivers... - * fail the request by clearing the uptodate flag, - * but do not return any error?! */ - error = -EIO; - } - if (error) + if (bio->bi_error) set_bit(__EE_WAS_ERROR, &peer_req->flags); bio_put(bio); /* no need for the bio anymore */ @@ -208,24 +197,13 @@ void drbd_peer_request_endio(struct bio *bio, int error) /* read, readA or write requests on R_PRIMARY coming from drbd_make_request */ -void drbd_request_endio(struct bio *bio, int error) +void drbd_request_endio(struct bio *bio) { unsigned long flags; struct drbd_request *req = bio->bi_private; struct drbd_device *device = req->device; struct bio_and_error m; enum drbd_req_event what; - int uptodate = bio_flagged(bio, BIO_UPTODATE); - - if (!error && !uptodate) { - drbd_warn(device, "p %s: setting error to -EIO\n", - bio_data_dir(bio) == WRITE ? "write" : "read"); - /* strange behavior of some lower level drivers... - * fail the request by clearing the uptodate flag, - * but do not return any error?! */ - error = -EIO; - } - /* If this request was aborted locally before, * but now was completed "successfully", @@ -259,14 +237,14 @@ void drbd_request_endio(struct bio *bio, int error) if (__ratelimit(&drbd_ratelimit_state)) drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n"); - if (!error) + if (!bio->bi_error) panic("possible random memory corruption caused by delayed completion of aborted local request\n"); } /* to avoid recursion in __req_mod */ - if (unlikely(error)) { + if (unlikely(bio->bi_error)) { if (bio->bi_rw & REQ_DISCARD) - what = (error == -EOPNOTSUPP) + what = (bio->bi_error == -EOPNOTSUPP) ? DISCARD_COMPLETED_NOTSUPP : DISCARD_COMPLETED_WITH_ERROR; else @@ -279,7 +257,7 @@ void drbd_request_endio(struct bio *bio, int error) what = COMPLETED_OK; bio_put(req->private_bio); - req->private_bio = ERR_PTR(error); + req->private_bio = ERR_PTR(bio->bi_error); /* not req_mod(), we need irqsave here! */ spin_lock_irqsave(&device->resource->req_lock, flags); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index a08cda955..331363e7d 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3771,13 +3771,14 @@ struct rb0_cbdata { struct completion complete; }; -static void floppy_rb0_cb(struct bio *bio, int err) +static void floppy_rb0_cb(struct bio *bio) { struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private; int drive = cbdata->drive; - if (err) { - pr_info("floppy: error %d while reading block 0\n", err); + if (bio->bi_error) { + pr_info("floppy: error %d while reading block 0\n", + bio->bi_error); set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); } complete(&cbdata->complete); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 23103ad14..291ec9e39 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -693,7 +693,7 @@ static void loop_config_discard(struct loop_device *lo) lo->lo_encrypt_key_size) { q->limits.discard_granularity = 0; q->limits.discard_alignment = 0; - q->limits.max_discard_sectors = 0; + blk_queue_max_discard_sectors(q, 0); q->limits.discard_zeroes_data = 0; queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); return; @@ -701,7 +701,7 @@ static void loop_config_discard(struct loop_device *lo) q->limits.discard_granularity = inode->i_sb->s_blocksize; q->limits.discard_alignment = 0; - q->limits.max_discard_sectors = UINT_MAX >> 9; + blk_queue_max_discard_sectors(q, UINT_MAX >> 9); q->limits.discard_zeroes_data = 1; queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); } @@ -1504,17 +1504,16 @@ static void loop_handle_cmd(struct loop_cmd *cmd) { const bool write = cmd->rq->cmd_flags & REQ_WRITE; struct loop_device *lo = cmd->rq->q->queuedata; - int ret = -EIO; + int ret = 0; - if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) + if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { + ret = -EIO; goto failed; + } ret = do_req_filebacked(lo, cmd->rq); - failed: - if (ret) - cmd->rq->errors = -EIO; - blk_mq_complete_request(cmd->rq); + blk_mq_complete_request(cmd->rq, ret ? -EIO : 0); } static void loop_queue_write_work(struct work_struct *work) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 0e385d8e9..1b8762338 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -40,8 +41,7 @@ #include struct nbd_device { - int flags; - int harderror; /* Code of hard error */ + u32 flags; struct socket * sock; /* If == NULL, device is not ready, yet */ int magic; @@ -56,11 +56,25 @@ struct nbd_device { struct gendisk *disk; int blksize; loff_t bytesize; - pid_t pid; /* pid of nbd-client, if attached */ int xmit_timeout; - int disconnect; /* a disconnect has been requested by user */ + bool disconnect; /* a disconnect has been requested by user */ + + struct timer_list timeout_timer; + spinlock_t tasks_lock; + struct task_struct *task_recv; + struct task_struct *task_send; + +#if IS_ENABLED(CONFIG_DEBUG_FS) + struct dentry *dbg_dir; +#endif }; +#if IS_ENABLED(CONFIG_DEBUG_FS) +static struct dentry *nbd_dbg_dir; +#endif + +#define nbd_name(nbd) ((nbd)->disk->disk_name) + #define NBD_MAGIC 0x68797548 static unsigned int nbds_max = 16; @@ -113,26 +127,38 @@ static void nbd_end_request(struct nbd_device *nbd, struct request *req) /* * Forcibly shutdown the socket causing all listeners to error */ -static void sock_shutdown(struct nbd_device *nbd, int lock) +static void sock_shutdown(struct nbd_device *nbd) { - if (lock) - mutex_lock(&nbd->tx_lock); - if (nbd->sock) { - dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n"); - kernel_sock_shutdown(nbd->sock, SHUT_RDWR); - nbd->sock = NULL; - } - if (lock) - mutex_unlock(&nbd->tx_lock); + if (!nbd->sock) + return; + + dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n"); + kernel_sock_shutdown(nbd->sock, SHUT_RDWR); + nbd->sock = NULL; + del_timer_sync(&nbd->timeout_timer); } static void nbd_xmit_timeout(unsigned long arg) { - struct task_struct *task = (struct task_struct *)arg; + struct nbd_device *nbd = (struct nbd_device *)arg; + unsigned long flags; + + if (list_empty(&nbd->queue_head)) + return; + + nbd->disconnect = true; + + spin_lock_irqsave(&nbd->tasks_lock, flags); - printk(KERN_WARNING "nbd: killing hung xmit (%s, pid: %d)\n", - task->comm, task->pid); - force_sig(SIGKILL, task); + if (nbd->task_recv) + force_sig(SIGKILL, nbd->task_recv); + + if (nbd->task_send) + force_sig(SIGKILL, nbd->task_send); + + spin_unlock_irqrestore(&nbd->tasks_lock, flags); + + dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n"); } /* @@ -171,33 +197,12 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size, msg.msg_controllen = 0; msg.msg_flags = msg_flags | MSG_NOSIGNAL; - if (send) { - struct timer_list ti; - - if (nbd->xmit_timeout) { - init_timer(&ti); - ti.function = nbd_xmit_timeout; - ti.data = (unsigned long)current; - ti.expires = jiffies + nbd->xmit_timeout; - add_timer(&ti); - } + if (send) result = kernel_sendmsg(sock, &msg, &iov, 1, size); - if (nbd->xmit_timeout) - del_timer_sync(&ti); - } else + else result = kernel_recvmsg(sock, &msg, &iov, 1, size, msg.msg_flags); - if (signal_pending(current)) { - siginfo_t info; - printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n", - task_pid_nr(current), current->comm, - dequeue_signal_lock(current, ¤t->blocked, &info)); - result = -EINTR; - sock_shutdown(nbd, !send); - break; - } - if (result <= 0) { if (result == 0) result = -EPIPE; /* short read */ @@ -210,6 +215,9 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size, sigprocmask(SIG_SETMASK, &oldset, NULL); tsk_restore_flags(current, pflags, PF_MEMALLOC); + if (!send && nbd->xmit_timeout) + mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout); + return result; } @@ -333,26 +341,24 @@ static struct request *nbd_read_stat(struct nbd_device *nbd) if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Receive control failed (result %d)\n", result); - goto harderror; + return ERR_PTR(result); } if (ntohl(reply.magic) != NBD_REPLY_MAGIC) { dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", (unsigned long)ntohl(reply.magic)); - result = -EPROTO; - goto harderror; + return ERR_PTR(-EPROTO); } req = nbd_find_request(nbd, *(struct request **)reply.handle); if (IS_ERR(req)) { result = PTR_ERR(req); if (result != -ENOENT) - goto harderror; + return ERR_PTR(result); dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n", reply.handle); - result = -EBADR; - goto harderror; + return ERR_PTR(-EBADR); } if (ntohl(reply.error)) { @@ -380,18 +386,15 @@ static struct request *nbd_read_stat(struct nbd_device *nbd) } } return req; -harderror: - nbd->harderror = result; - return NULL; } static ssize_t pid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); + struct nbd_device *nbd = (struct nbd_device *)disk->private_data; - return sprintf(buf, "%ld\n", - (long) ((struct nbd_device *)disk->private_data)->pid); + return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv)); } static struct device_attribute pid_attr = { @@ -399,28 +402,60 @@ static struct device_attribute pid_attr = { .show = pid_show, }; -static int nbd_do_it(struct nbd_device *nbd) +static int nbd_thread_recv(struct nbd_device *nbd) { struct request *req; int ret; + unsigned long flags; BUG_ON(nbd->magic != NBD_MAGIC); sk_set_memalloc(nbd->sock->sk); - nbd->pid = task_pid_nr(current); + + spin_lock_irqsave(&nbd->tasks_lock, flags); + nbd->task_recv = current; + spin_unlock_irqrestore(&nbd->tasks_lock, flags); + ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr); if (ret) { dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); - nbd->pid = 0; + + spin_lock_irqsave(&nbd->tasks_lock, flags); + nbd->task_recv = NULL; + spin_unlock_irqrestore(&nbd->tasks_lock, flags); + return ret; } - while ((req = nbd_read_stat(nbd)) != NULL) + while (1) { + req = nbd_read_stat(nbd); + if (IS_ERR(req)) { + ret = PTR_ERR(req); + break; + } + nbd_end_request(nbd, req); + } device_remove_file(disk_to_dev(nbd->disk), &pid_attr); - nbd->pid = 0; - return 0; + + spin_lock_irqsave(&nbd->tasks_lock, flags); + nbd->task_recv = NULL; + spin_unlock_irqrestore(&nbd->tasks_lock, flags); + + if (signal_pending(current)) { + siginfo_t info; + + ret = dequeue_signal_lock(current, ¤t->blocked, &info); + dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n", + task_pid_nr(current), current->comm, ret); + mutex_lock(&nbd->tx_lock); + sock_shutdown(nbd); + mutex_unlock(&nbd->tx_lock); + ret = -ETIMEDOUT; + } + + return ret; } static void nbd_clear_que(struct nbd_device *nbd) @@ -455,6 +490,7 @@ static void nbd_clear_que(struct nbd_device *nbd) req->errors++; nbd_end_request(nbd, req); } + dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n"); } @@ -482,6 +518,9 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req) nbd->active_req = req; + if (nbd->xmit_timeout && list_empty_careful(&nbd->queue_head)) + mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout); + if (nbd_send_req(nbd, req) != 0) { dev_err(disk_to_dev(nbd->disk), "Request send failed\n"); req->errors++; @@ -503,10 +542,15 @@ error_out: nbd_end_request(nbd, req); } -static int nbd_thread(void *data) +static int nbd_thread_send(void *data) { struct nbd_device *nbd = data; struct request *req; + unsigned long flags; + + spin_lock_irqsave(&nbd->tasks_lock, flags); + nbd->task_send = current; + spin_unlock_irqrestore(&nbd->tasks_lock, flags); set_user_nice(current, MIN_NICE); while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) { @@ -515,6 +559,20 @@ static int nbd_thread(void *data) kthread_should_stop() || !list_empty(&nbd->waiting_queue)); + if (signal_pending(current)) { + siginfo_t info; + int ret; + + ret = dequeue_signal_lock(current, ¤t->blocked, + &info); + dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n", + task_pid_nr(current), current->comm, ret); + mutex_lock(&nbd->tx_lock); + sock_shutdown(nbd); + mutex_unlock(&nbd->tx_lock); + break; + } + /* extract request */ if (list_empty(&nbd->waiting_queue)) continue; @@ -528,6 +586,17 @@ static int nbd_thread(void *data) /* handle request */ nbd_handle_req(nbd, req); } + + spin_lock_irqsave(&nbd->tasks_lock, flags); + nbd->task_send = NULL; + spin_unlock_irqrestore(&nbd->tasks_lock, flags); + + /* Clear maybe pending signals */ + if (signal_pending(current)) { + siginfo_t info; + dequeue_signal_lock(current, ¤t->blocked, &info); + } + return 0; } @@ -538,7 +607,7 @@ static int nbd_thread(void *data) * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); } */ -static void do_nbd_request(struct request_queue *q) +static void nbd_request_handler(struct request_queue *q) __releases(q->queue_lock) __acquires(q->queue_lock) { struct request *req; @@ -574,6 +643,9 @@ static void do_nbd_request(struct request_queue *q) } } +static int nbd_dev_dbg_init(struct nbd_device *nbd); +static void nbd_dev_dbg_close(struct nbd_device *nbd); + /* Must be called with tx_lock held */ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, @@ -597,7 +669,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, if (!nbd->sock) return -EINVAL; - nbd->disconnect = 1; + nbd->disconnect = true; nbd_send_req(nbd, &sreq); return 0; @@ -625,7 +697,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, nbd->sock = sock; if (max_part > 0) bdev->bd_invalidated = 1; - nbd->disconnect = 0; /* we're connected now */ + nbd->disconnect = false; /* we're connected now */ return 0; } return -EINVAL; @@ -648,6 +720,12 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, case NBD_SET_TIMEOUT: nbd->xmit_timeout = arg * HZ; + if (arg) + mod_timer(&nbd->timeout_timer, + jiffies + nbd->xmit_timeout); + else + del_timer_sync(&nbd->timeout_timer); + return 0; case NBD_SET_FLAGS: @@ -666,7 +744,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, struct socket *sock; int error; - if (nbd->pid) + if (nbd->task_recv) return -EBUSY; if (!nbd->sock) return -EINVAL; @@ -683,24 +761,24 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, else blk_queue_flush(nbd->disk->queue, 0); - thread = kthread_run(nbd_thread, nbd, "%s", - nbd->disk->disk_name); + thread = kthread_run(nbd_thread_send, nbd, "%s", + nbd_name(nbd)); if (IS_ERR(thread)) { mutex_lock(&nbd->tx_lock); return PTR_ERR(thread); } - error = nbd_do_it(nbd); + nbd_dev_dbg_init(nbd); + error = nbd_thread_recv(nbd); + nbd_dev_dbg_close(nbd); kthread_stop(thread); mutex_lock(&nbd->tx_lock); - if (error) - return error; - sock_shutdown(nbd, 0); + + sock_shutdown(nbd); sock = nbd->sock; nbd->sock = NULL; nbd_clear_que(nbd); - dev_warn(disk_to_dev(nbd->disk), "queue cleared\n"); kill_bdev(bdev); queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); set_device_ro(bdev, false); @@ -714,7 +792,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, blkdev_reread_part(bdev); if (nbd->disconnect) /* user requested, ignore socket errors */ return 0; - return nbd->harderror; + return error; } case NBD_CLEAR_QUE: @@ -758,6 +836,161 @@ static const struct block_device_operations nbd_fops = .ioctl = nbd_ioctl, }; +#if IS_ENABLED(CONFIG_DEBUG_FS) + +static int nbd_dbg_tasks_show(struct seq_file *s, void *unused) +{ + struct nbd_device *nbd = s->private; + + if (nbd->task_recv) + seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv)); + if (nbd->task_send) + seq_printf(s, "send: %d\n", task_pid_nr(nbd->task_send)); + + return 0; +} + +static int nbd_dbg_tasks_open(struct inode *inode, struct file *file) +{ + return single_open(file, nbd_dbg_tasks_show, inode->i_private); +} + +static const struct file_operations nbd_dbg_tasks_ops = { + .open = nbd_dbg_tasks_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int nbd_dbg_flags_show(struct seq_file *s, void *unused) +{ + struct nbd_device *nbd = s->private; + u32 flags = nbd->flags; + + seq_printf(s, "Hex: 0x%08x\n\n", flags); + + seq_puts(s, "Known flags:\n"); + + if (flags & NBD_FLAG_HAS_FLAGS) + seq_puts(s, "NBD_FLAG_HAS_FLAGS\n"); + if (flags & NBD_FLAG_READ_ONLY) + seq_puts(s, "NBD_FLAG_READ_ONLY\n"); + if (flags & NBD_FLAG_SEND_FLUSH) + seq_puts(s, "NBD_FLAG_SEND_FLUSH\n"); + if (flags & NBD_FLAG_SEND_TRIM) + seq_puts(s, "NBD_FLAG_SEND_TRIM\n"); + + return 0; +} + +static int nbd_dbg_flags_open(struct inode *inode, struct file *file) +{ + return single_open(file, nbd_dbg_flags_show, inode->i_private); +} + +static const struct file_operations nbd_dbg_flags_ops = { + .open = nbd_dbg_flags_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int nbd_dev_dbg_init(struct nbd_device *nbd) +{ + struct dentry *dir; + struct dentry *f; + + dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir); + if (IS_ERR_OR_NULL(dir)) { + dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s' (%ld)\n", + nbd_name(nbd), PTR_ERR(dir)); + return PTR_ERR(dir); + } + nbd->dbg_dir = dir; + + f = debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops); + if (IS_ERR_OR_NULL(f)) { + dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'tasks', %ld\n", + PTR_ERR(f)); + return PTR_ERR(f); + } + + f = debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize); + if (IS_ERR_OR_NULL(f)) { + dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'size_bytes', %ld\n", + PTR_ERR(f)); + return PTR_ERR(f); + } + + f = debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout); + if (IS_ERR_OR_NULL(f)) { + dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'timeout', %ld\n", + PTR_ERR(f)); + return PTR_ERR(f); + } + + f = debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize); + if (IS_ERR_OR_NULL(f)) { + dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'blocksize', %ld\n", + PTR_ERR(f)); + return PTR_ERR(f); + } + + f = debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops); + if (IS_ERR_OR_NULL(f)) { + dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'flags', %ld\n", + PTR_ERR(f)); + return PTR_ERR(f); + } + + return 0; +} + +static void nbd_dev_dbg_close(struct nbd_device *nbd) +{ + debugfs_remove_recursive(nbd->dbg_dir); +} + +static int nbd_dbg_init(void) +{ + struct dentry *dbg_dir; + + dbg_dir = debugfs_create_dir("nbd", NULL); + if (IS_ERR(dbg_dir)) + return PTR_ERR(dbg_dir); + + nbd_dbg_dir = dbg_dir; + + return 0; +} + +static void nbd_dbg_close(void) +{ + debugfs_remove_recursive(nbd_dbg_dir); +} + +#else /* IS_ENABLED(CONFIG_DEBUG_FS) */ + +static int nbd_dev_dbg_init(struct nbd_device *nbd) +{ + return 0; +} + +static void nbd_dev_dbg_close(struct nbd_device *nbd) +{ +} + +static int nbd_dbg_init(void) +{ + return 0; +} + +static void nbd_dbg_close(void) +{ +} + +#endif + /* * And here should be modules and kernel interface * (Just smiley confuses emacs :-) @@ -811,7 +1044,7 @@ static int __init nbd_init(void) * every gendisk to have its very own request_queue struct. * These structs are big so we dynamically allocate them. */ - disk->queue = blk_init_queue(do_nbd_request, &nbd_lock); + disk->queue = blk_init_queue(nbd_request_handler, &nbd_lock); if (!disk->queue) { put_disk(disk); goto out; @@ -822,7 +1055,7 @@ static int __init nbd_init(void) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); disk->queue->limits.discard_granularity = 512; - disk->queue->limits.max_discard_sectors = UINT_MAX; + blk_queue_max_discard_sectors(disk->queue, UINT_MAX); disk->queue->limits.discard_zeroes_data = 0; blk_queue_max_hw_sectors(disk->queue, 65536); disk->queue->limits.max_sectors = 256; @@ -835,13 +1068,19 @@ static int __init nbd_init(void) printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR); + nbd_dbg_init(); + for (i = 0; i < nbds_max; i++) { struct gendisk *disk = nbd_dev[i].disk; nbd_dev[i].magic = NBD_MAGIC; INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); spin_lock_init(&nbd_dev[i].queue_lock); + spin_lock_init(&nbd_dev[i].tasks_lock); INIT_LIST_HEAD(&nbd_dev[i].queue_head); mutex_init(&nbd_dev[i].tx_lock); + init_timer(&nbd_dev[i].timeout_timer); + nbd_dev[i].timeout_timer.function = nbd_xmit_timeout; + nbd_dev[i].timeout_timer.data = (unsigned long)&nbd_dev[i]; init_waitqueue_head(&nbd_dev[i].active_wq); init_waitqueue_head(&nbd_dev[i].waiting_wq); nbd_dev[i].blksize = 1024; @@ -868,6 +1107,9 @@ out: static void __exit nbd_cleanup(void) { int i; + + nbd_dbg_close(); + for (i = 0; i < nbds_max; i++) { struct gendisk *disk = nbd_dev[i].disk; nbd_dev[i].magic = 0; diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 3177b245d..1c9e4fe5a 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -222,7 +222,7 @@ static void end_cmd(struct nullb_cmd *cmd) blk_end_request_all(cmd->rq, 0); break; case NULL_Q_BIO: - bio_endio(cmd->bio, 0); + bio_endio(cmd->bio); break; } @@ -289,7 +289,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd) case NULL_IRQ_SOFTIRQ: switch (queue_mode) { case NULL_Q_MQ: - blk_mq_complete_request(cmd->rq); + blk_mq_complete_request(cmd->rq, cmd->rq->errors); break; case NULL_Q_RQ: blk_complete_request(cmd->rq); @@ -406,6 +406,22 @@ static struct blk_mq_ops null_mq_ops = { .complete = null_softirq_done_fn, }; +static void cleanup_queue(struct nullb_queue *nq) +{ + kfree(nq->tag_map); + kfree(nq->cmds); +} + +static void cleanup_queues(struct nullb *nullb) +{ + int i; + + for (i = 0; i < nullb->nr_queues; i++) + cleanup_queue(&nullb->queues[i]); + + kfree(nullb->queues); +} + static void null_del_dev(struct nullb *nullb) { list_del_init(&nullb->list); @@ -415,6 +431,7 @@ static void null_del_dev(struct nullb *nullb) if (queue_mode == NULL_Q_MQ) blk_mq_free_tag_set(&nullb->tag_set); put_disk(nullb->disk); + cleanup_queues(nullb); kfree(nullb); } @@ -459,22 +476,6 @@ static int setup_commands(struct nullb_queue *nq) return 0; } -static void cleanup_queue(struct nullb_queue *nq) -{ - kfree(nq->tag_map); - kfree(nq->cmds); -} - -static void cleanup_queues(struct nullb *nullb) -{ - int i; - - for (i = 0; i < nullb->nr_queues; i++) - cleanup_queue(&nullb->queues[i]); - - kfree(nullb->queues); -} - static int setup_queues(struct nullb *nullb) { nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), @@ -588,8 +589,7 @@ static int null_add_dev(void) blk_queue_physical_block_size(nullb->q, bs); size = gb * 1024 * 1024 * 1024ULL; - sector_div(size, bs); - set_capacity(disk, size); + set_capacity(disk, size >> 9); disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; disk->major = null_major; diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 7920c2741..ccc0c1f93 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -72,6 +72,10 @@ module_param(nvme_char_major, int, 0); static int use_threaded_interrupts; module_param(use_threaded_interrupts, int, 0); +static bool use_cmb_sqes = true; +module_param(use_cmb_sqes, bool, 0644); +MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); + static DEFINE_SPINLOCK(dev_list_lock); static LIST_HEAD(dev_list); static struct task_struct *nvme_thread; @@ -103,6 +107,7 @@ struct nvme_queue { char irqname[24]; /* nvme4294967295-65535\0 */ spinlock_t q_lock; struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; volatile struct nvme_completion *cqes; struct blk_mq_tags **tags; dma_addr_t sq_dma_addr; @@ -379,27 +384,28 @@ static void *nvme_finish_cmd(struct nvme_queue *nvmeq, int tag, * * Safe to use from interrupt context */ -static int __nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) +static void __nvme_submit_cmd(struct nvme_queue *nvmeq, + struct nvme_command *cmd) { u16 tail = nvmeq->sq_tail; - memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); + if (nvmeq->sq_cmds_io) + memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd)); + else + memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); + if (++tail == nvmeq->q_depth) tail = 0; writel(tail, nvmeq->q_db); nvmeq->sq_tail = tail; - - return 0; } -static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) +static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) { unsigned long flags; - int ret; spin_lock_irqsave(&nvmeq->q_lock, flags); - ret = __nvme_submit_cmd(nvmeq, cmd); + __nvme_submit_cmd(nvmeq, cmd); spin_unlock_irqrestore(&nvmeq->q_lock, flags); - return ret; } static __le64 **iod_list(struct nvme_iod *iod) @@ -597,31 +603,34 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, struct nvme_iod *iod = ctx; struct request *req = iod_get_private(iod); struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req); - u16 status = le16_to_cpup(&cqe->status) >> 1; + bool requeue = false; + int error = 0; if (unlikely(status)) { if (!(status & NVME_SC_DNR || blk_noretry_request(req)) && (jiffies - req->start_time) < req->timeout) { unsigned long flags; + requeue = true; blk_mq_requeue_request(req); spin_lock_irqsave(req->q->queue_lock, flags); if (!blk_queue_stopped(req->q)) blk_mq_kick_requeue_list(req->q); spin_unlock_irqrestore(req->q->queue_lock, flags); - return; + goto release_iod; } + if (req->cmd_type == REQ_TYPE_DRV_PRIV) { if (cmd_rq->ctx == CMD_CTX_CANCELLED) - req->errors = -EINTR; + error = -EINTR; else - req->errors = status; + error = status; } else { - req->errors = nvme_error_status(status); + error = nvme_error_status(status); } - } else - req->errors = 0; + } + if (req->cmd_type == REQ_TYPE_DRV_PRIV) { u32 result = le32_to_cpup(&cqe->result); req->special = (void *)(uintptr_t)result; @@ -630,8 +639,9 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, if (cmd_rq->aborted) dev_warn(nvmeq->dev->dev, "completing aborted command with status:%04x\n", - status); + error); +release_iod: if (iod->nents) { dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents, rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); @@ -644,7 +654,8 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, } nvme_free_iod(nvmeq->dev, iod); - blk_mq_complete_request(req); + if (likely(!requeue)) + blk_mq_complete_request(req, error); } /* length is in bytes. gfp flags indicates whether we may sleep. */ @@ -730,18 +741,16 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, static void nvme_submit_priv(struct nvme_queue *nvmeq, struct request *req, struct nvme_iod *iod) { - struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; + struct nvme_command cmnd; - memcpy(cmnd, req->cmd, sizeof(struct nvme_command)); - cmnd->rw.command_id = req->tag; + memcpy(&cmnd, req->cmd, sizeof(cmnd)); + cmnd.rw.command_id = req->tag; if (req->nr_phys_segments) { - cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); - cmnd->rw.prp2 = cpu_to_le64(iod->first_dma); + cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); + cmnd.rw.prp2 = cpu_to_le64(iod->first_dma); } - if (++nvmeq->sq_tail == nvmeq->q_depth) - nvmeq->sq_tail = 0; - writel(nvmeq->sq_tail, nvmeq->q_db); + __nvme_submit_cmd(nvmeq, &cmnd); } /* @@ -754,45 +763,41 @@ static void nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns, { struct nvme_dsm_range *range = (struct nvme_dsm_range *)iod_list(iod)[0]; - struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; + struct nvme_command cmnd; range->cattr = cpu_to_le32(0); range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift); range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); - memset(cmnd, 0, sizeof(*cmnd)); - cmnd->dsm.opcode = nvme_cmd_dsm; - cmnd->dsm.command_id = req->tag; - cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); - cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma); - cmnd->dsm.nr = 0; - cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); + memset(&cmnd, 0, sizeof(cmnd)); + cmnd.dsm.opcode = nvme_cmd_dsm; + cmnd.dsm.command_id = req->tag; + cmnd.dsm.nsid = cpu_to_le32(ns->ns_id); + cmnd.dsm.prp1 = cpu_to_le64(iod->first_dma); + cmnd.dsm.nr = 0; + cmnd.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); - if (++nvmeq->sq_tail == nvmeq->q_depth) - nvmeq->sq_tail = 0; - writel(nvmeq->sq_tail, nvmeq->q_db); + __nvme_submit_cmd(nvmeq, &cmnd); } static void nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, int cmdid) { - struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; + struct nvme_command cmnd; - memset(cmnd, 0, sizeof(*cmnd)); - cmnd->common.opcode = nvme_cmd_flush; - cmnd->common.command_id = cmdid; - cmnd->common.nsid = cpu_to_le32(ns->ns_id); + memset(&cmnd, 0, sizeof(cmnd)); + cmnd.common.opcode = nvme_cmd_flush; + cmnd.common.command_id = cmdid; + cmnd.common.nsid = cpu_to_le32(ns->ns_id); - if (++nvmeq->sq_tail == nvmeq->q_depth) - nvmeq->sq_tail = 0; - writel(nvmeq->sq_tail, nvmeq->q_db); + __nvme_submit_cmd(nvmeq, &cmnd); } static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod, struct nvme_ns *ns) { struct request *req = iod_get_private(iod); - struct nvme_command *cmnd; + struct nvme_command cmnd; u16 control = 0; u32 dsmgmt = 0; @@ -804,19 +809,16 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod, if (req->cmd_flags & REQ_RAHEAD) dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; - cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; - memset(cmnd, 0, sizeof(*cmnd)); - - cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); - cmnd->rw.command_id = req->tag; - cmnd->rw.nsid = cpu_to_le32(ns->ns_id); - cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); - cmnd->rw.prp2 = cpu_to_le64(iod->first_dma); - cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); - cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); + memset(&cmnd, 0, sizeof(cmnd)); + cmnd.rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); + cmnd.rw.command_id = req->tag; + cmnd.rw.nsid = cpu_to_le32(ns->ns_id); + cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); + cmnd.rw.prp2 = cpu_to_le64(iod->first_dma); + cmnd.rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); + cmnd.rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); - if (blk_integrity_rq(req)) { - cmnd->rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg)); + if (ns->ms) { switch (ns->pi_type) { case NVME_NS_DPS_PI_TYPE3: control |= NVME_RW_PRINFO_PRCHK_GUARD; @@ -825,19 +827,21 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod, case NVME_NS_DPS_PI_TYPE2: control |= NVME_RW_PRINFO_PRCHK_GUARD | NVME_RW_PRINFO_PRCHK_REF; - cmnd->rw.reftag = cpu_to_le32( + cmnd.rw.reftag = cpu_to_le32( nvme_block_nr(ns, blk_rq_pos(req))); break; } - } else if (ns->ms) - control |= NVME_RW_PRINFO_PRACT; + if (blk_integrity_rq(req)) + cmnd.rw.metadata = + cpu_to_le64(sg_dma_address(iod->meta_sg)); + else + control |= NVME_RW_PRINFO_PRACT; + } - cmnd->rw.control = cpu_to_le16(control); - cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); + cmnd.rw.control = cpu_to_le16(control); + cmnd.rw.dsmgmt = cpu_to_le32(dsmgmt); - if (++nvmeq->sq_tail == nvmeq->q_depth) - nvmeq->sq_tail = 0; - writel(nvmeq->sq_tail, nvmeq->q_db); + __nvme_submit_cmd(nvmeq, &cmnd); return 0; } @@ -864,8 +868,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, if (ns && ns->ms && !blk_integrity_rq(req)) { if (!(ns->pi_type && ns->ms == 8) && req->cmd_type != REQ_TYPE_DRV_PRIV) { - req->errors = -EFAULT; - blk_mq_complete_request(req); + blk_mq_complete_request(req, -EFAULT); return BLK_MQ_RQ_QUEUE_OK; } } @@ -1080,7 +1083,8 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev) c.common.command_id = req->tag; blk_mq_free_request(req); - return __nvme_submit_cmd(nvmeq, &c); + __nvme_submit_cmd(nvmeq, &c); + return 0; } static int nvme_submit_admin_async_cmd(struct nvme_dev *dev, @@ -1103,7 +1107,8 @@ static int nvme_submit_admin_async_cmd(struct nvme_dev *dev, cmd->common.command_id = req->tag; - return nvme_submit_cmd(nvmeq, cmd); + nvme_submit_cmd(nvmeq, cmd); + return 0; } static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) @@ -1315,12 +1320,7 @@ static void nvme_abort_req(struct request *req) dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag, nvmeq->qid); - if (nvme_submit_cmd(dev->queues[0], &cmd) < 0) { - dev_warn(nvmeq->q_dmadev, - "Could not abort I/O %d QID %d", - req->tag, nvmeq->qid); - blk_mq_free_request(abort_req); - } + nvme_submit_cmd(dev->queues[0], &cmd); } static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved) @@ -1374,7 +1374,8 @@ static void nvme_free_queue(struct nvme_queue *nvmeq) { dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes, nvmeq->cq_dma_addr); - dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), + if (nvmeq->sq_cmds) + dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), nvmeq->sq_cmds, nvmeq->sq_dma_addr); kfree(nvmeq); } @@ -1447,6 +1448,47 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid) spin_unlock_irq(&nvmeq->q_lock); } +static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, + int entry_size) +{ + int q_depth = dev->q_depth; + unsigned q_size_aligned = roundup(q_depth * entry_size, dev->page_size); + + if (q_size_aligned * nr_io_queues > dev->cmb_size) { + u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); + mem_per_q = round_down(mem_per_q, dev->page_size); + q_depth = div_u64(mem_per_q, entry_size); + + /* + * Ensure the reduced q_depth is above some threshold where it + * would be better to map queues in system memory with the + * original depth + */ + if (q_depth < 64) + return -ENOMEM; + } + + return q_depth; +} + +static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, + int qid, int depth) +{ + if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) { + unsigned offset = (qid - 1) * + roundup(SQ_SIZE(depth), dev->page_size); + nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset; + nvmeq->sq_cmds_io = dev->cmb + offset; + } else { + nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), + &nvmeq->sq_dma_addr, GFP_KERNEL); + if (!nvmeq->sq_cmds) + return -ENOMEM; + } + + return 0; +} + static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) { @@ -1459,9 +1501,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, if (!nvmeq->cqes) goto free_nvmeq; - nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), - &nvmeq->sq_dma_addr, GFP_KERNEL); - if (!nvmeq->sq_cmds) + if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth)) goto free_cqdma; nvmeq->q_dmadev = dev->dev; @@ -1696,6 +1736,12 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) page_shift = dev_page_max; } + dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ? + NVME_CAP_NSSRC(cap) : 0; + + if (dev->subsystem && (readl(&dev->bar->csts) & NVME_CSTS_NSSRO)) + writel(NVME_CSTS_NSSRO, &dev->bar->csts); + result = nvme_disable_ctrl(dev, cap); if (result < 0) return result; @@ -1764,7 +1810,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) length = (io.nblocks + 1) << ns->lba_shift; meta_len = (io.nblocks + 1) * ns->ms; - metadata = (void __user *)(unsigned long)io.metadata; + metadata = (void __user *)(uintptr_t)io.metadata; write = io.opcode & 1; if (ns->ext) { @@ -1804,7 +1850,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) c.rw.metadata = cpu_to_le64(meta_dma); status = __nvme_submit_sync_cmd(ns->queue, &c, NULL, - (void __user *)io.addr, length, NULL, 0); + (void __user *)(uintptr_t)io.addr, length, NULL, 0); unmap: if (meta) { if (status == NVME_SC_SUCCESS && !write) { @@ -1846,7 +1892,7 @@ static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns, timeout = msecs_to_jiffies(cmd.timeout_ms); status = __nvme_submit_sync_cmd(ns ? ns->queue : dev->admin_q, &c, - NULL, (void __user *)cmd.addr, cmd.data_len, + NULL, (void __user *)(uintptr_t)cmd.addr, cmd.data_len, &cmd.result, timeout); if (status >= 0) { if (put_user(cmd.result, &ucmd->result)) @@ -1856,6 +1902,15 @@ static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns, return status; } +static int nvme_subsys_reset(struct nvme_dev *dev) +{ + if (!dev->subsystem) + return -ENOTTY; + + writel(0x4E564D65, &dev->bar->nssr); /* "NVMe" */ + return 0; +} + static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { @@ -1935,7 +1990,7 @@ static void nvme_config_discard(struct nvme_ns *ns) ns->queue->limits.discard_zeroes_data = 0; ns->queue->limits.discard_alignment = logical_block_size; ns->queue->limits.discard_granularity = logical_block_size; - ns->queue->limits.max_discard_sectors = 0xffffffff; + blk_queue_max_discard_sectors(ns->queue, 0xffffffff); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); } @@ -1989,7 +2044,7 @@ static int nvme_revalidate_disk(struct gendisk *disk) !ns->ext) nvme_init_integrity(ns); - if (ns->ms && !blk_get_integrity(disk)) + if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk)) set_capacity(disk, 0); else set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); @@ -2020,7 +2075,10 @@ static int nvme_kthread(void *data) spin_lock(&dev_list_lock); list_for_each_entry_safe(dev, next, &dev_list, node) { int i; - if (readl(&dev->bar->csts) & NVME_CSTS_CFS) { + u32 csts = readl(&dev->bar->csts); + + if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) || + csts & NVME_CSTS_CFS) { if (work_busy(&dev->reset_work)) continue; list_del_init(&dev->node); @@ -2067,7 +2125,6 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid) goto out_free_ns; queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); - queue_flag_set_unlocked(QUEUE_FLAG_SG_GAPS, ns->queue); ns->dev = dev; ns->queue->queuedata = ns; @@ -2081,12 +2138,16 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid) list_add_tail(&ns->list, &dev->namespaces); blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); - if (dev->max_hw_sectors) + if (dev->max_hw_sectors) { blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); + blk_queue_max_segments(ns->queue, + ((dev->max_hw_sectors << 9) / dev->page_size) + 1); + } if (dev->stripe_size) blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); if (dev->vwc & NVME_CTRL_VWC_PRESENT) blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA); + blk_queue_virt_boundary(ns->queue, dev->page_size - 1); disk->major = nvme_major; disk->first_minor = 0; @@ -2159,6 +2220,58 @@ static int set_queue_count(struct nvme_dev *dev, int count) return min(result & 0xffff, result >> 16) + 1; } +static void __iomem *nvme_map_cmb(struct nvme_dev *dev) +{ + u64 szu, size, offset; + u32 cmbloc; + resource_size_t bar_size; + struct pci_dev *pdev = to_pci_dev(dev->dev); + void __iomem *cmb; + dma_addr_t dma_addr; + + if (!use_cmb_sqes) + return NULL; + + dev->cmbsz = readl(&dev->bar->cmbsz); + if (!(NVME_CMB_SZ(dev->cmbsz))) + return NULL; + + cmbloc = readl(&dev->bar->cmbloc); + + szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); + size = szu * NVME_CMB_SZ(dev->cmbsz); + offset = szu * NVME_CMB_OFST(cmbloc); + bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc)); + + if (offset > bar_size) + return NULL; + + /* + * Controllers may support a CMB size larger than their BAR, + * for example, due to being behind a bridge. Reduce the CMB to + * the reported size of the BAR + */ + if (size > bar_size - offset) + size = bar_size - offset; + + dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset; + cmb = ioremap_wc(dma_addr, size); + if (!cmb) + return NULL; + + dev->cmb_dma_addr = dma_addr; + dev->cmb_size = size; + return cmb; +} + +static inline void nvme_release_cmb(struct nvme_dev *dev) +{ + if (dev->cmb) { + iounmap(dev->cmb); + dev->cmb = NULL; + } +} + static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) { return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride); @@ -2177,6 +2290,15 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) if (result < nr_io_queues) nr_io_queues = result; + if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) { + result = nvme_cmb_qdepth(dev, nr_io_queues, + sizeof(struct nvme_command)); + if (result > 0) + dev->q_depth = result; + else + nvme_release_cmb(dev); + } + size = db_bar_size(dev, nr_io_queues); if (size > 8192) { iounmap(dev->bar); @@ -2321,6 +2443,22 @@ static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn) list_sort(NULL, &dev->namespaces, ns_cmp); } +static void nvme_set_irq_hints(struct nvme_dev *dev) +{ + struct nvme_queue *nvmeq; + int i; + + for (i = 0; i < dev->online_queues; i++) { + nvmeq = dev->queues[i]; + + if (!nvmeq->tags || !(*nvmeq->tags)) + continue; + + irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, + blk_mq_tags_cpumask(*nvmeq->tags)); + } +} + static void nvme_dev_scan(struct work_struct *work) { struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work); @@ -2332,6 +2470,7 @@ static void nvme_dev_scan(struct work_struct *work) return; nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn)); kfree(ctrl); + nvme_set_irq_hints(dev); } /* @@ -2344,7 +2483,6 @@ static int nvme_dev_add(struct nvme_dev *dev) { struct pci_dev *pdev = to_pci_dev(dev->dev); int res; - unsigned nn; struct nvme_id_ctrl *ctrl; int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; @@ -2354,7 +2492,6 @@ static int nvme_dev_add(struct nvme_dev *dev) return -EIO; } - nn = le32_to_cpup(&ctrl->nn); dev->oncs = le16_to_cpup(&ctrl->oncs); dev->abort_limit = ctrl->acl + 1; dev->vwc = ctrl->vwc; @@ -2440,6 +2577,8 @@ static int nvme_dev_map(struct nvme_dev *dev) dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); dev->db_stride = 1 << NVME_CAP_STRIDE(cap); dev->dbs = ((void __iomem *)dev->bar) + 4096; + if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) + dev->cmb = nvme_map_cmb(dev); return 0; @@ -2820,6 +2959,8 @@ static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg) case NVME_IOCTL_RESET: dev_warn(dev->dev, "resetting controller\n"); return nvme_reset(dev); + case NVME_IOCTL_SUBSYS_RESET: + return nvme_subsys_reset(dev); default: return -ENOTTY; } @@ -2833,22 +2974,6 @@ static const struct file_operations nvme_dev_fops = { .compat_ioctl = nvme_dev_ioctl, }; -static void nvme_set_irq_hints(struct nvme_dev *dev) -{ - struct nvme_queue *nvmeq; - int i; - - for (i = 0; i < dev->online_queues; i++) { - nvmeq = dev->queues[i]; - - if (!nvmeq->tags || !(*nvmeq->tags)) - continue; - - irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, - blk_mq_tags_cpumask(*nvmeq->tags)); - } -} - static int nvme_dev_start(struct nvme_dev *dev) { int result; @@ -2890,8 +3015,6 @@ static int nvme_dev_start(struct nvme_dev *dev) if (result) goto free_tags; - nvme_set_irq_hints(dev); - dev->event_limit = 1; return result; @@ -2942,7 +3065,6 @@ static int nvme_dev_resume(struct nvme_dev *dev) } else { nvme_unfreeze_queues(dev); nvme_dev_add(dev); - nvme_set_irq_hints(dev); } return 0; } @@ -3145,6 +3267,7 @@ static void nvme_remove(struct pci_dev *pdev) nvme_dev_remove_admin(dev); device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance)); nvme_free_queues(dev, 0); + nvme_release_cmb(dev); nvme_release_prp_pools(dev); kref_put(&dev->kref, nvme_free_dev); } diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 4c20c2281..7be2375db 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -977,7 +977,7 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec) } } -static void pkt_end_io_read(struct bio *bio, int err) +static void pkt_end_io_read(struct bio *bio) { struct packet_data *pkt = bio->bi_private; struct pktcdvd_device *pd = pkt->pd; @@ -985,9 +985,9 @@ static void pkt_end_io_read(struct bio *bio, int err) pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", bio, (unsigned long long)pkt->sector, - (unsigned long long)bio->bi_iter.bi_sector, err); + (unsigned long long)bio->bi_iter.bi_sector, bio->bi_error); - if (err) + if (bio->bi_error) atomic_inc(&pkt->io_errors); if (atomic_dec_and_test(&pkt->io_wait)) { atomic_inc(&pkt->run_sm); @@ -996,13 +996,13 @@ static void pkt_end_io_read(struct bio *bio, int err) pkt_bio_finished(pd); } -static void pkt_end_io_packet_write(struct bio *bio, int err) +static void pkt_end_io_packet_write(struct bio *bio) { struct packet_data *pkt = bio->bi_private; struct pktcdvd_device *pd = pkt->pd; BUG_ON(!pd); - pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, err); + pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_error); pd->stats.pkt_ended++; @@ -1340,22 +1340,22 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) pkt_queue_bio(pd, pkt->w_bio); } -static void pkt_finish_packet(struct packet_data *pkt, int uptodate) +static void pkt_finish_packet(struct packet_data *pkt, int error) { struct bio *bio; - if (!uptodate) + if (error) pkt->cache_valid = 0; /* Finish all bios corresponding to this packet */ - while ((bio = bio_list_pop(&pkt->orig_bios))) - bio_endio(bio, uptodate ? 0 : -EIO); + while ((bio = bio_list_pop(&pkt->orig_bios))) { + bio->bi_error = error; + bio_endio(bio); + } } static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt) { - int uptodate; - pkt_dbg(2, pd, "pkt %d\n", pkt->id); for (;;) { @@ -1384,7 +1384,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data if (atomic_read(&pkt->io_wait) > 0) return; - if (test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags)) { + if (!pkt->w_bio->bi_error) { pkt_set_state(pkt, PACKET_FINISHED_STATE); } else { pkt_set_state(pkt, PACKET_RECOVERY_STATE); @@ -1401,8 +1401,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data break; case PACKET_FINISHED_STATE: - uptodate = test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags); - pkt_finish_packet(pkt, uptodate); + pkt_finish_packet(pkt, pkt->w_bio->bi_error); return; default: @@ -2332,13 +2331,14 @@ static void pkt_close(struct gendisk *disk, fmode_t mode) } -static void pkt_end_io_read_cloned(struct bio *bio, int err) +static void pkt_end_io_read_cloned(struct bio *bio) { struct packet_stacked_data *psd = bio->bi_private; struct pktcdvd_device *pd = psd->pd; + psd->bio->bi_error = bio->bi_error; bio_put(bio); - bio_endio(psd->bio, err); + bio_endio(psd->bio); mempool_free(psd, psd_pool); pkt_bio_finished(pd); } @@ -2447,6 +2447,10 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio) char b[BDEVNAME_SIZE]; struct bio *split; + blk_queue_bounce(q, &bio); + + blk_queue_split(q, &bio, q->bio_split); + pd = q->queuedata; if (!pd) { pr_err("%s incorrect request queue\n", @@ -2477,8 +2481,6 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio) goto end_io; } - blk_queue_bounce(q, &bio); - do { sector_t zone = get_zone(bio->bi_iter.bi_sector, pd); sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd); @@ -2504,26 +2506,6 @@ end_io: -static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd, - struct bio_vec *bvec) -{ - struct pktcdvd_device *pd = q->queuedata; - sector_t zone = get_zone(bmd->bi_sector, pd); - int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size; - int remaining = (pd->settings.size << 9) - used; - int remaining2; - - /* - * A bio <= PAGE_SIZE must be allowed. If it crosses a packet - * boundary, pkt_make_request() will split the bio. - */ - remaining2 = PAGE_SIZE - bmd->bi_size; - remaining = max(remaining, remaining2); - - BUG_ON(remaining < 0); - return remaining; -} - static void pkt_init_queue(struct pktcdvd_device *pd) { struct request_queue *q = pd->disk->queue; @@ -2531,7 +2513,6 @@ static void pkt_init_queue(struct pktcdvd_device *pd) blk_queue_make_request(q, pkt_make_request); blk_queue_logical_block_size(q, CD_FRAMESIZE); blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS); - blk_queue_merge_bvec(q, pkt_merge_bvec); q->queuedata = pd; } diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index b1612eb16..d89fcac59 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c @@ -593,7 +593,8 @@ out: next = bio_list_peek(&priv->list); spin_unlock_irq(&priv->lock); - bio_endio(bio, error); + bio->bi_error = error; + bio_endio(bio); return next; } @@ -605,6 +606,8 @@ static void ps3vram_make_request(struct request_queue *q, struct bio *bio) dev_dbg(&dev->core, "%s\n", __func__); + blk_queue_split(q, &bio, q->bio_split); + spin_lock_irq(&priv->lock); busy = !bio_list_empty(&priv->list); bio_list_add(&priv->list, bio); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 324bf35ec..128e7df5b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -96,6 +96,8 @@ static int atomic_dec_return_safe(atomic_t *v) #define RBD_MINORS_PER_MAJOR 256 #define RBD_SINGLE_MAJOR_PART_SHIFT 4 +#define RBD_MAX_PARENT_CHAIN_LEN 16 + #define RBD_SNAP_DEV_NAME_PREFIX "snap_" #define RBD_MAX_SNAP_NAME_LEN \ (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1)) @@ -426,7 +428,7 @@ static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf, size_t count); static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf, size_t count); -static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping); +static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth); static void rbd_spec_put(struct rbd_spec *spec); static int rbd_dev_id_to_minor(int dev_id) @@ -1863,9 +1865,11 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, rbd_osd_read_callback(obj_request); break; case CEPH_OSD_OP_SETALLOCHINT: - rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE); + rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE || + osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL); /* fall through */ case CEPH_OSD_OP_WRITE: + case CEPH_OSD_OP_WRITEFULL: rbd_osd_write_callback(obj_request); break; case CEPH_OSD_OP_STAT: @@ -2401,7 +2405,10 @@ static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request, opcode = CEPH_OSD_OP_ZERO; } } else if (op_type == OBJ_OP_WRITE) { - opcode = CEPH_OSD_OP_WRITE; + if (!offset && length == object_size) + opcode = CEPH_OSD_OP_WRITEFULL; + else + opcode = CEPH_OSD_OP_WRITE; osd_req_op_alloc_hint_init(osd_request, num_ops, object_size, object_size); num_ops++; @@ -3474,52 +3481,6 @@ static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx, return BLK_MQ_RQ_QUEUE_OK; } -/* - * a queue callback. Makes sure that we don't create a bio that spans across - * multiple osd objects. One exception would be with a single page bios, - * which we handle later at bio_chain_clone_range() - */ -static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd, - struct bio_vec *bvec) -{ - struct rbd_device *rbd_dev = q->queuedata; - sector_t sector_offset; - sector_t sectors_per_obj; - sector_t obj_sector_offset; - int ret; - - /* - * Find how far into its rbd object the partition-relative - * bio start sector is to offset relative to the enclosing - * device. - */ - sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector; - sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT); - obj_sector_offset = sector_offset & (sectors_per_obj - 1); - - /* - * Compute the number of bytes from that offset to the end - * of the object. Account for what's already used by the bio. - */ - ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT; - if (ret > bmd->bi_size) - ret -= bmd->bi_size; - else - ret = 0; - - /* - * Don't send back more than was asked for. And if the bio - * was empty, let the whole thing through because: "Note - * that a block device *must* allow a single page to be - * added to an empty bio." - */ - rbd_assert(bvec->bv_len <= PAGE_SIZE); - if (ret > (int) bvec->bv_len || !bmd->bi_size) - ret = (int) bvec->bv_len; - - return ret; -} - static void rbd_free_disk(struct rbd_device *rbd_dev) { struct gendisk *disk = rbd_dev->disk; @@ -3806,6 +3767,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) /* set io sizes to object size */ segment_size = rbd_obj_bytes(&rbd_dev->header); blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); + q->limits.max_sectors = queue_max_hw_sectors(q); blk_queue_max_segments(q, segment_size / SECTOR_SIZE); blk_queue_max_segment_size(q, segment_size); blk_queue_io_min(q, segment_size); @@ -3815,10 +3777,12 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); q->limits.discard_granularity = segment_size; q->limits.discard_alignment = segment_size; - q->limits.max_discard_sectors = segment_size / SECTOR_SIZE; + blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); q->limits.discard_zeroes_data = 1; - blk_queue_merge_bvec(q, rbd_merge_bvec); + if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) + q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES; + disk->queue = q; q->queuedata = rbd_dev; @@ -4720,7 +4684,10 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev) } ret = rbd_dev_v2_snap_context(rbd_dev); - dout("rbd_dev_v2_snap_context returned %d\n", ret); + if (ret && first_time) { + kfree(rbd_dev->header.object_prefix); + rbd_dev->header.object_prefix = NULL; + } return ret; } @@ -5169,44 +5136,51 @@ out_err: return ret; } -static int rbd_dev_probe_parent(struct rbd_device *rbd_dev) +/* + * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() -> + * rbd_dev_image_probe() recursion depth, which means it's also the + * length of the already discovered part of the parent chain. + */ +static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth) { struct rbd_device *parent = NULL; - struct rbd_spec *parent_spec; - struct rbd_client *rbdc; int ret; if (!rbd_dev->parent_spec) return 0; - /* - * We need to pass a reference to the client and the parent - * spec when creating the parent rbd_dev. Images related by - * parent/child relationships always share both. - */ - parent_spec = rbd_spec_get(rbd_dev->parent_spec); - rbdc = __rbd_get_client(rbd_dev->rbd_client); - ret = -ENOMEM; - parent = rbd_dev_create(rbdc, parent_spec, NULL); - if (!parent) + if (++depth > RBD_MAX_PARENT_CHAIN_LEN) { + pr_info("parent chain is too long (%d)\n", depth); + ret = -EINVAL; goto out_err; + } - ret = rbd_dev_image_probe(parent, false); + parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec, + NULL); + if (!parent) { + ret = -ENOMEM; + goto out_err; + } + + /* + * Images related by parent/child relationships always share + * rbd_client and spec/parent_spec, so bump their refcounts. + */ + __rbd_get_client(rbd_dev->rbd_client); + rbd_spec_get(rbd_dev->parent_spec); + + ret = rbd_dev_image_probe(parent, depth); if (ret < 0) goto out_err; + rbd_dev->parent = parent; atomic_set(&rbd_dev->parent_ref, 1); - return 0; + out_err: - if (parent) { - rbd_dev_unparent(rbd_dev); + rbd_dev_unparent(rbd_dev); + if (parent) rbd_dev_destroy(parent); - } else { - rbd_put_client(rbdc); - rbd_spec_put(parent_spec); - } - return ret; } @@ -5324,7 +5298,7 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev) * parent), initiate a watch on its header object before using that * object to get detailed information about the rbd image. */ -static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) +static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) { int ret; @@ -5342,7 +5316,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) if (ret) goto err_out_format; - if (mapping) { + if (!depth) { ret = rbd_dev_header_watch_sync(rbd_dev); if (ret) { if (ret == -ENOENT) @@ -5363,7 +5337,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) * Otherwise this is a parent image, identified by pool, image * and snap ids - need to fill in names for those ids. */ - if (mapping) + if (!depth) ret = rbd_spec_fill_snap_id(rbd_dev); else ret = rbd_spec_fill_names(rbd_dev); @@ -5385,12 +5359,12 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) * Need to warn users if this image is the one being * mapped and has a parent. */ - if (mapping && rbd_dev->parent_spec) + if (!depth && rbd_dev->parent_spec) rbd_warn(rbd_dev, "WARNING: kernel layering is EXPERIMENTAL!"); } - ret = rbd_dev_probe_parent(rbd_dev); + ret = rbd_dev_probe_parent(rbd_dev, depth); if (ret) goto err_out_probe; @@ -5401,7 +5375,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) err_out_probe: rbd_dev_unprobe(rbd_dev); err_out_watch: - if (mapping) + if (!depth) rbd_dev_header_unwatch_sync(rbd_dev); out_header_name: kfree(rbd_dev->header_name); @@ -5464,7 +5438,7 @@ static ssize_t do_rbd_add(struct bus_type *bus, spec = NULL; /* rbd_dev now owns this */ rbd_opts = NULL; /* rbd_dev now owns this */ - rc = rbd_dev_image_probe(rbd_dev, true); + rc = rbd_dev_image_probe(rbd_dev, 0); if (rc < 0) goto err_out_rbd_dev; diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index ac8c62cb4..3163e4cdc 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c @@ -137,7 +137,10 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card, if (!card->eeh_state && card->gendisk) disk_stats_complete(card, meta->bio, meta->start_time); - bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0); + if (atomic_read(&meta->error)) + bio_io_error(meta->bio); + else + bio_endio(meta->bio); kmem_cache_free(bio_meta_pool, meta); } } @@ -148,6 +151,8 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio) struct rsxx_bio_meta *bio_meta; int st = -EINVAL; + blk_queue_split(q, &bio, q->bio_split); + might_sleep(); if (!card) @@ -199,7 +204,9 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio) queue_err: kmem_cache_free(bio_meta_pool, bio_meta); req_err: - bio_endio(bio, st); + if (st) + bio->bi_error = st; + bio_endio(bio); } /*----------------- Device Setup -------------------*/ diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 1e46eb230..586f9168f 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -4422,7 +4422,7 @@ static int skd_cons_disk(struct skd_device *skdev) /* DISCARD Flag initialization. */ q->limits.discard_granularity = 8192; q->limits.discard_alignment = 0; - q->limits.max_discard_sectors = UINT_MAX >> 9; + blk_queue_max_discard_sectors(q, UINT_MAX >> 9); q->limits.discard_zeroes_data = 1; queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 4cf81b5bf..04d65790a 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -456,7 +456,7 @@ static void process_page(unsigned long data) PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); if (control & DMASCR_HARD_ERROR) { /* error */ - clear_bit(BIO_UPTODATE, &bio->bi_flags); + bio->bi_error = -EIO; dev_printk(KERN_WARNING, &card->dev->dev, "I/O error on sector %d/%d\n", le32_to_cpu(desc->local_addr)>>9, @@ -505,7 +505,7 @@ static void process_page(unsigned long data) return_bio = bio->bi_next; bio->bi_next = NULL; - bio_endio(bio, 0); + bio_endio(bio); } } @@ -531,6 +531,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio) (unsigned long long)bio->bi_iter.bi_sector, bio->bi_iter.bi_size); + blk_queue_split(q, &bio, q->bio_split); + spin_lock_irq(&card->lock); *card->biotail = bio; bio->bi_next = NULL; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index d4d05f064..6ca35495a 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -144,7 +144,7 @@ static void virtblk_done(struct virtqueue *vq) do { virtqueue_disable_cb(vq); while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { - blk_mq_complete_request(vbr->req); + blk_mq_complete_request(vbr->req, vbr->req->errors); req_done = true; } if (unlikely(virtqueue_is_broken(vq))) @@ -478,8 +478,7 @@ static int virtblk_get_cache_mode(struct virtio_device *vdev) struct virtio_blk_config, wce, &writeback); if (err) - writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE) || - virtio_has_feature(vdev, VIRTIO_F_VERSION_1); + writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE); return writeback; } @@ -657,6 +656,7 @@ static int virtblk_probe(struct virtio_device *vdev) vblk->disk->private_data = vblk; vblk->disk->fops = &virtblk_fops; vblk->disk->driverfs_dev = &vdev->dev; + vblk->disk->flags |= GENHD_FL_EXT_DEVT; vblk->index = index; /* configure queue flush support */ @@ -840,7 +840,7 @@ static unsigned int features_legacy[] = { static unsigned int features[] = { VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, - VIRTIO_BLK_F_TOPOLOGY, + VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, VIRTIO_BLK_F_MQ, }; diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 954c0029f..6a685aec6 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -1078,9 +1078,9 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) /* * bio callback. */ -static void end_block_io_op(struct bio *bio, int error) +static void end_block_io_op(struct bio *bio) { - __end_block_io_op(bio->bi_private, error); + __end_block_io_op(bio->bi_private, bio->bi_error); bio_put(bio); } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 7a8a73f1f..a69c02dad 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -37,6 +37,7 @@ #include #include +#include #include #include #include @@ -82,7 +83,6 @@ struct blk_shadow { struct split_bio { struct bio *bio; atomic_t pending; - int err; }; static DEFINE_MUTEX(blkfront_mutex); @@ -148,6 +148,7 @@ struct blkfront_info unsigned int feature_persistent:1; unsigned int max_indirect_segments; int is_ready; + struct blk_mq_tag_set tag_set; }; static unsigned int nr_minors; @@ -248,7 +249,7 @@ static struct grant *get_grant(grant_ref_t *gref_head, struct blkfront_info *info) { struct grant *gnt_list_entry; - unsigned long buffer_mfn; + unsigned long buffer_gfn; BUG_ON(list_empty(&info->grants)); gnt_list_entry = list_first_entry(&info->grants, struct grant, @@ -267,10 +268,10 @@ static struct grant *get_grant(grant_ref_t *gref_head, BUG_ON(!pfn); gnt_list_entry->pfn = pfn; } - buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); + buffer_gfn = pfn_to_gfn(gnt_list_entry->pfn); gnttab_grant_foreign_access_ref(gnt_list_entry->gref, info->xbdev->otherend_id, - buffer_mfn, 0); + buffer_gfn, 0); return gnt_list_entry; } @@ -617,54 +618,41 @@ static inline bool blkif_request_flush_invalid(struct request *req, !(info->feature_flush & REQ_FUA))); } -/* - * do_blkif_request - * read a block; request is in a request queue - */ -static void do_blkif_request(struct request_queue *rq) +static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *qd) { - struct blkfront_info *info = NULL; - struct request *req; - int queued; - - pr_debug("Entered do_blkif_request\n"); + struct blkfront_info *info = qd->rq->rq_disk->private_data; - queued = 0; - - while ((req = blk_peek_request(rq)) != NULL) { - info = req->rq_disk->private_data; - - if (RING_FULL(&info->ring)) - goto wait; + blk_mq_start_request(qd->rq); + spin_lock_irq(&info->io_lock); + if (RING_FULL(&info->ring)) + goto out_busy; - blk_start_request(req); + if (blkif_request_flush_invalid(qd->rq, info)) + goto out_err; - if (blkif_request_flush_invalid(req, info)) { - __blk_end_request_all(req, -EOPNOTSUPP); - continue; - } + if (blkif_queue_request(qd->rq)) + goto out_busy; - pr_debug("do_blk_req %p: cmd %p, sec %lx, " - "(%u/%u) [%s]\n", - req, req->cmd, (unsigned long)blk_rq_pos(req), - blk_rq_cur_sectors(req), blk_rq_sectors(req), - rq_data_dir(req) ? "write" : "read"); - - if (blkif_queue_request(req)) { - blk_requeue_request(rq, req); -wait: - /* Avoid pointless unplugs. */ - blk_stop_queue(rq); - break; - } + flush_requests(info); + spin_unlock_irq(&info->io_lock); + return BLK_MQ_RQ_QUEUE_OK; - queued++; - } +out_err: + spin_unlock_irq(&info->io_lock); + return BLK_MQ_RQ_QUEUE_ERROR; - if (queued != 0) - flush_requests(info); +out_busy: + spin_unlock_irq(&info->io_lock); + blk_mq_stop_hw_queue(hctx); + return BLK_MQ_RQ_QUEUE_BUSY; } +static struct blk_mq_ops blkfront_mq_ops = { + .queue_rq = blkif_queue_rq, + .map_queue = blk_mq_map_queue, +}; + static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, unsigned int physical_sector_size, unsigned int segments) @@ -672,9 +660,22 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, struct request_queue *rq; struct blkfront_info *info = gd->private_data; - rq = blk_init_queue(do_blkif_request, &info->io_lock); - if (rq == NULL) + memset(&info->tag_set, 0, sizeof(info->tag_set)); + info->tag_set.ops = &blkfront_mq_ops; + info->tag_set.nr_hw_queues = 1; + info->tag_set.queue_depth = BLK_RING_SIZE(info); + info->tag_set.numa_node = NUMA_NO_NODE; + info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; + info->tag_set.cmd_size = 0; + info->tag_set.driver_data = info; + + if (blk_mq_alloc_tag_set(&info->tag_set)) return -1; + rq = blk_mq_init_queue(&info->tag_set); + if (IS_ERR(rq)) { + blk_mq_free_tag_set(&info->tag_set); + return -1; + } queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); @@ -902,19 +903,15 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, static void xlvbd_release_gendisk(struct blkfront_info *info) { unsigned int minor, nr_minors; - unsigned long flags; if (info->rq == NULL) return; - spin_lock_irqsave(&info->io_lock, flags); - /* No more blkif_request(). */ - blk_stop_queue(info->rq); + blk_mq_stop_hw_queues(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); - spin_unlock_irqrestore(&info->io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ flush_work(&info->work); @@ -926,20 +923,18 @@ static void xlvbd_release_gendisk(struct blkfront_info *info) xlbd_release_minors(minor, nr_minors); blk_cleanup_queue(info->rq); + blk_mq_free_tag_set(&info->tag_set); info->rq = NULL; put_disk(info->gd); info->gd = NULL; } +/* Must be called with io_lock holded */ static void kick_pending_request_queues(struct blkfront_info *info) { - if (!RING_FULL(&info->ring)) { - /* Re-enable calldowns. */ - blk_start_queue(info->rq); - /* Kick things off immediately. */ - do_blkif_request(info->rq); - } + if (!RING_FULL(&info->ring)) + blk_mq_start_stopped_hw_queues(info->rq, true); } static void blkif_restart_queue(struct work_struct *work) @@ -964,7 +959,7 @@ static void blkif_free(struct blkfront_info *info, int suspend) BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; /* No more blkif_request(). */ if (info->rq) - blk_stop_queue(info->rq); + blk_mq_stop_hw_queues(info->rq); /* Remove all persistent grants */ if (!list_empty(&info->grants)) { @@ -1201,7 +1196,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) queue_flag_clear(QUEUE_FLAG_DISCARD, rq); queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); } - __blk_end_request_all(req, error); + blk_mq_complete_request(req, error); break; case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_WRITE_BARRIER: @@ -1229,7 +1224,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " "request: %x\n", bret->status); - __blk_end_request_all(req, error); + blk_mq_complete_request(req, error); break; default: BUG(); @@ -1481,16 +1476,14 @@ static int blkfront_probe(struct xenbus_device *dev, return 0; } -static void split_bio_end(struct bio *bio, int error) +static void split_bio_end(struct bio *bio) { struct split_bio *split_bio = bio->bi_private; - if (error) - split_bio->err = error; - if (atomic_dec_and_test(&split_bio->pending)) { split_bio->bio->bi_phys_segments = 0; - bio_endio(split_bio->bio, split_bio->err); + split_bio->bio->bi_error = bio->bi_error; + bio_endio(split_bio->bio); kfree(split_bio); } bio_put(bio); @@ -1558,28 +1551,6 @@ static int blkif_recover(struct blkfront_info *info) kfree(copy); - /* - * Empty the queue, this is important because we might have - * requests in the queue with more segments than what we - * can handle now. - */ - spin_lock_irq(&info->io_lock); - while ((req = blk_fetch_request(info->rq)) != NULL) { - if (req->cmd_flags & - (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) { - list_add(&req->queuelist, &requests); - continue; - } - merge_bio.head = req->bio; - merge_bio.tail = req->biotail; - bio_list_merge(&bio_list, &merge_bio); - req->bio = NULL; - if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) - pr_alert("diskcache flush request found!\n"); - __blk_end_request_all(req, 0); - } - spin_unlock_irq(&info->io_lock); - xenbus_switch_state(info->xbdev, XenbusStateConnected); spin_lock_irq(&info->io_lock); @@ -1594,9 +1565,10 @@ static int blkif_recover(struct blkfront_info *info) /* Requeue pending requests (flush or discard) */ list_del_init(&req->queuelist); BUG_ON(req->nr_phys_segments > segs); - blk_requeue_request(info->rq, req); + blk_mq_requeue_request(req); } spin_unlock_irq(&info->io_lock); + blk_mq_kick_requeue_list(info->rq); while ((bio = bio_list_pop(&bio_list)) != NULL) { /* Traverse the list of pending bios and re-queue them */ @@ -1984,7 +1956,8 @@ static void blkback_changed(struct xenbus_device *dev, break; /* Missed the backend's Closing state -- fallthrough */ case XenbusStateClosing: - blkfront_closing(info); + if (info) + blkfront_closing(info); break; } } diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 763301c78..9fa15bb9d 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -388,7 +388,6 @@ static ssize_t comp_algorithm_store(struct device *dev, static ssize_t compact_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { - unsigned long nr_migrated; struct zram *zram = dev_to_zram(dev); struct zram_meta *meta; @@ -399,8 +398,7 @@ static ssize_t compact_store(struct device *dev, } meta = zram->meta; - nr_migrated = zs_compact(meta->mem_pool); - atomic64_add(nr_migrated, &zram->stats.num_migrated); + zs_compact(meta->mem_pool); up_read(&zram->init_lock); return len; @@ -428,26 +426,31 @@ static ssize_t mm_stat_show(struct device *dev, struct device_attribute *attr, char *buf) { struct zram *zram = dev_to_zram(dev); + struct zs_pool_stats pool_stats; u64 orig_size, mem_used = 0; long max_used; ssize_t ret; + memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats)); + down_read(&zram->init_lock); - if (init_done(zram)) + if (init_done(zram)) { mem_used = zs_get_total_pages(zram->meta->mem_pool); + zs_pool_stats(zram->meta->mem_pool, &pool_stats); + } orig_size = atomic64_read(&zram->stats.pages_stored); max_used = atomic_long_read(&zram->stats.max_used_pages); ret = scnprintf(buf, PAGE_SIZE, - "%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n", + "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n", orig_size << PAGE_SHIFT, (u64)atomic64_read(&zram->stats.compr_data_size), mem_used << PAGE_SHIFT, zram->limit_pages << PAGE_SHIFT, max_used << PAGE_SHIFT, (u64)atomic64_read(&zram->stats.zero_pages), - (u64)atomic64_read(&zram->stats.num_migrated)); + pool_stats.pages_compacted); up_read(&zram->init_lock); return ret; @@ -619,7 +622,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, uncmem = user_mem; if (!uncmem) { - pr_info("Unable to allocate temp memory\n"); + pr_err("Unable to allocate temp memory\n"); ret = -ENOMEM; goto out_cleanup; } @@ -716,7 +719,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, handle = zs_malloc(meta->mem_pool, clen); if (!handle) { - pr_info("Error allocating memory for compressed page: %u, size=%zu\n", + pr_err("Error allocating memory for compressed page: %u, size=%zu\n", index, clen); ret = -ENOMEM; goto out; @@ -848,7 +851,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) if (unlikely(bio->bi_rw & REQ_DISCARD)) { zram_bio_discard(zram, index, offset, bio); - bio_endio(bio, 0); + bio_endio(bio); return; } @@ -881,8 +884,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) update_position(&index, &offset, &bvec); } - set_bit(BIO_UPTODATE, &bio->bi_flags); - bio_endio(bio, 0); + bio_endio(bio); return; out: @@ -899,6 +901,8 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio) if (unlikely(!zram_meta_get(zram))) goto error; + blk_queue_split(queue, &bio, queue->bio_split); + if (!valid_io_request(zram, bio->bi_iter.bi_sector, bio->bi_iter.bi_size)) { atomic64_inc(&zram->stats.invalid_io); @@ -1035,7 +1039,7 @@ static ssize_t disksize_store(struct device *dev, comp = zcomp_create(zram->compressor, zram->max_comp_streams); if (IS_ERR(comp)) { - pr_info("Cannot initialise %s compressing backend\n", + pr_err("Cannot initialise %s compressing backend\n", zram->compressor); err = PTR_ERR(comp); goto out_free_meta; @@ -1213,7 +1217,7 @@ static int zram_add(void) /* gendisk structure */ zram->disk = alloc_disk(1); if (!zram->disk) { - pr_warn("Error allocating disk structure for device %d\n", + pr_err("Error allocating disk structure for device %d\n", device_id); ret = -ENOMEM; goto out_free_queue; @@ -1242,7 +1246,7 @@ static int zram_add(void) blk_queue_io_min(zram->disk->queue, PAGE_SIZE); blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); zram->disk->queue->limits.discard_granularity = PAGE_SIZE; - zram->disk->queue->limits.max_discard_sectors = UINT_MAX; + blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); /* * zram_bio_discard() will clear all logical blocks if logical block * size is identical with physical block size(PAGE_SIZE). But if it is @@ -1262,7 +1266,8 @@ static int zram_add(void) ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj, &zram_disk_attr_group); if (ret < 0) { - pr_warn("Error creating sysfs group"); + pr_err("Error creating sysfs group for device %d\n", + device_id); goto out_free_disk; } strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor)); @@ -1402,13 +1407,13 @@ static int __init zram_init(void) ret = class_register(&zram_control_class); if (ret) { - pr_warn("Unable to register zram-control class\n"); + pr_err("Unable to register zram-control class\n"); return ret; } zram_major = register_blkdev(0, "zram"); if (zram_major <= 0) { - pr_warn("Unable to get major number\n"); + pr_err("Unable to get major number\n"); class_unregister(&zram_control_class); return -EBUSY; } diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 6dbe2df50..8e9233968 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -78,7 +78,6 @@ struct zram_stats { atomic64_t compr_data_size; /* compressed size of pages stored */ atomic64_t num_reads; /* failed + successful */ atomic64_t num_writes; /* --do-- */ - atomic64_t num_migrated; /* no. of migrated object */ atomic64_t failed_reads; /* can happen when memory is too low */ atomic64_t failed_writes; /* can happen when memory is too low */ atomic64_t invalid_io; /* non-page-aligned I/O requests */ diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 2e777071e..0bd88c942 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -13,6 +13,10 @@ config BT_RTL tristate select FW_LOADER +config BT_QCA + tristate + select FW_LOADER + config BT_HCIBTUSB tristate "HCI USB driver" depends on USB @@ -132,6 +136,7 @@ config BT_HCIUART_3WIRE config BT_HCIUART_INTEL bool "Intel protocol support" depends on BT_HCIUART + select BT_HCIUART_H4 select BT_INTEL help The Intel protocol support enables Bluetooth HCI over serial @@ -150,6 +155,19 @@ config BT_HCIUART_BCM Say Y here to compile support for Broadcom protocol. +config BT_HCIUART_QCA + bool "Qualcomm Atheros protocol support" + depends on BT_HCIUART + select BT_HCIUART_H4 + select BT_QCA + help + The Qualcomm Atheros protocol supports HCI In-Band Sleep feature + over serial port interface(H4) between controller and host. + This protocol is required for UART clock control for QCA Bluetooth + devices. + + Say Y here to compile support for QCA protocol. + config BT_HCIBCM203X tristate "HCI BCM203x USB driver" depends on USB diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile index f40e194e7..07c9cf381 100644 --- a/drivers/bluetooth/Makefile +++ b/drivers/bluetooth/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o obj-$(CONFIG_BT_WILINK) += btwilink.o obj-$(CONFIG_BT_BCM) += btbcm.o obj-$(CONFIG_BT_RTL) += btrtl.o +obj-$(CONFIG_BT_QCA) += btqca.o btmrvl-y := btmrvl_main.o btmrvl-$(CONFIG_DEBUG_FS) += btmrvl_debugfs.o @@ -34,6 +35,7 @@ hci_uart-$(CONFIG_BT_HCIUART_ATH3K) += hci_ath.o hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o hci_uart-$(CONFIG_BT_HCIUART_INTEL) += hci_intel.o hci_uart-$(CONFIG_BT_HCIUART_BCM) += hci_bcm.o +hci_uart-$(CONFIG_BT_HCIUART_QCA) += hci_qca.o hci_uart-objs := $(hci_uart-y) ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 06dbb4d97..cda867ada 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -93,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x04CA, 0x300f) }, { USB_DEVICE(0x04CA, 0x3010) }, { USB_DEVICE(0x0930, 0x0219) }, + { USB_DEVICE(0x0930, 0x021c) }, { USB_DEVICE(0x0930, 0x0220) }, { USB_DEVICE(0x0930, 0x0227) }, { USB_DEVICE(0x0b05, 0x17d0) }, @@ -104,6 +105,7 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x0CF3, 0x311F) }, { USB_DEVICE(0x0cf3, 0x3121) }, { USB_DEVICE(0x0CF3, 0x817a) }, + { USB_DEVICE(0x0CF3, 0x817b) }, { USB_DEVICE(0x0cf3, 0xe003) }, { USB_DEVICE(0x0CF3, 0xE004) }, { USB_DEVICE(0x0CF3, 0xE005) }, @@ -153,6 +155,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, @@ -164,6 +167,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0CF3, 0x817b), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c index 73b214c39..b45c22fe1 100644 --- a/drivers/bluetooth/bfusb.c +++ b/drivers/bluetooth/bfusb.c @@ -492,7 +492,7 @@ static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb) case HCI_SCODATA_PKT: hdev->stat.sco_tx++; break; - }; + } /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index dbc3767b0..1549f861b 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c @@ -427,7 +427,7 @@ static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) case HCI_SCODATA_PKT: hdev->stat.sco_tx++; break; - }; + } /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 40cb8fadf..2fc8a6ed7 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -34,6 +34,7 @@ #define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}}) #define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}}) +#define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}}) int btbcm_check_bdaddr(struct hci_dev *hdev) { @@ -66,9 +67,13 @@ int btbcm_check_bdaddr(struct hci_dev *hdev) * * The address 43:24:B3:00:00:00 indicates a BCM4324B3 controller * with waiting for configuration state. + * + * The address 43:30:B1:00:00:00 indicates a BCM4330B1 controller + * with waiting for configuration state. */ if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0) || - !bacmp(&bda->bdaddr, BDADDR_BCM4324B3)) { + !bacmp(&bda->bdaddr, BDADDR_BCM4324B3) || + !bacmp(&bda->bdaddr, BDADDR_BCM4330B1)) { BT_INFO("%s: BCM: Using default device address (%pMR)", hdev->name, &bda->bdaddr); set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); @@ -241,6 +246,7 @@ static const struct { u16 subver; const char *name; } bcm_uart_subver_table[] = { + { 0x4103, "BCM4330B1" }, /* 002.001.003 */ { 0x410e, "BCM43341B0" }, /* 002.001.014 */ { 0x4406, "BCM4324B3" }, /* 002.004.006 */ { 0x610c, "BCM4354" }, /* 003.001.012 */ diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 828f2f8d1..ddec437f8 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -89,7 +89,88 @@ int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) } EXPORT_SYMBOL_GPL(btintel_set_bdaddr); +void btintel_hw_error(struct hci_dev *hdev, u8 code) +{ + struct sk_buff *skb; + u8 type = 0x00; + + BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code); + + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: Reset after hardware error failed (%ld)", + hdev->name, PTR_ERR(skb)); + return; + } + kfree_skb(skb); + + skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: Retrieving Intel exception info failed (%ld)", + hdev->name, PTR_ERR(skb)); + return; + } + + if (skb->len != 13) { + BT_ERR("%s: Exception info size mismatch", hdev->name); + kfree_skb(skb); + return; + } + + BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1)); + + kfree_skb(skb); +} +EXPORT_SYMBOL_GPL(btintel_hw_error); + +void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver) +{ + const char *variant; + + switch (ver->fw_variant) { + case 0x06: + variant = "Bootloader"; + break; + case 0x23: + variant = "Firmware"; + break; + default: + return; + } + + BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name, + variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f, + ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy); +} +EXPORT_SYMBOL_GPL(btintel_version_info); + +int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen, + const void *param) +{ + while (plen > 0) { + struct sk_buff *skb; + u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen; + + cmd_param[0] = fragment_type; + memcpy(cmd_param + 1, param, fragment_len); + + skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1, + cmd_param, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + + plen -= fragment_len; + param += fragment_len; + } + + return 0; +} +EXPORT_SYMBOL_GPL(btintel_secure_send); + MODULE_AUTHOR("Marcel Holtmann "); MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); +/*(DEBLOBBED)*/ diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h index 4bda6ab34..b278d1475 100644 --- a/drivers/bluetooth/btintel.h +++ b/drivers/bluetooth/btintel.h @@ -73,6 +73,11 @@ struct intel_secure_send_result { int btintel_check_bdaddr(struct hci_dev *hdev); int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); +void btintel_hw_error(struct hci_dev *hdev, u8 code); + +void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver); +int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen, + const void *param); #else @@ -86,4 +91,18 @@ static inline int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdadd return -EOPNOTSUPP; } +static inline void btintel_hw_error(struct hci_dev *hdev, u8 code) +{ +} + +static void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver) +{ +} + +static inline int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, + u32 plen, const void *param) +{ + return -EOPNOTSUPP; +} + #endif diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h index 086f0ec89..27a9aac25 100644 --- a/drivers/bluetooth/btmrvl_drv.h +++ b/drivers/bluetooth/btmrvl_drv.h @@ -95,10 +95,10 @@ struct btmrvl_private { struct btmrvl_device btmrvl_dev; struct btmrvl_adapter *adapter; struct btmrvl_thread main_thread; - int (*hw_host_to_card) (struct btmrvl_private *priv, + int (*hw_host_to_card)(struct btmrvl_private *priv, u8 *payload, u16 nb); - int (*hw_wakeup_firmware) (struct btmrvl_private *priv); - int (*hw_process_int_status) (struct btmrvl_private *priv); + int (*hw_wakeup_firmware)(struct btmrvl_private *priv); + int (*hw_process_int_status)(struct btmrvl_private *priv); void (*firmware_dump)(struct btmrvl_private *priv); spinlock_t driver_lock; /* spinlock used by driver */ #ifdef CONFIG_DEBUG_FS diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index ac1a43687..40c1aacb4 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -1071,8 +1071,6 @@ static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card) } } - sdio_release_host(card->func); - /* * winner or not, with this test the FW synchronizes when the * module can continue its initialization @@ -1082,6 +1080,8 @@ static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card) return -ETIMEDOUT; } + sdio_release_host(card->func); + return 0; done: @@ -1376,8 +1376,7 @@ done: /* fw_dump_data will be free in device coredump release function after 5 min*/ - dev_coredumpv(&priv->btmrvl_dev.hcidev->dev, fw_dump_data, - fw_dump_len, GFP_KERNEL); + dev_coredumpv(&card->func->dev, fw_dump_data, fw_dump_len, GFP_KERNEL); BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump end"); } diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c new file mode 100644 index 000000000..eadbcd138 --- /dev/null +++ b/drivers/bluetooth/btqca.c @@ -0,0 +1,392 @@ +/* + * Bluetooth supports for Qualcomm Atheros chips + * + * Copyright (c) 2015 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ +#include +#include + +#include +#include + +#include "btqca.h" + +#define VERSION "0.1" + +static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version) +{ + struct sk_buff *skb; + struct edl_event_hdr *edl; + struct rome_version *ver; + char cmd; + int err = 0; + + BT_DBG("%s: ROME Patch Version Request", hdev->name); + + cmd = EDL_PATCH_VER_REQ_CMD; + skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN, + &cmd, HCI_VENDOR_PKT, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + BT_ERR("%s: Failed to read version of ROME (%d)", hdev->name, + err); + return err; + } + + if (skb->len != sizeof(*edl) + sizeof(*ver)) { + BT_ERR("%s: Version size mismatch len %d", hdev->name, + skb->len); + err = -EILSEQ; + goto out; + } + + edl = (struct edl_event_hdr *)(skb->data); + if (!edl || !edl->data) { + BT_ERR("%s: TLV with no header or no data", hdev->name); + err = -EILSEQ; + goto out; + } + + if (edl->cresp != EDL_CMD_REQ_RES_EVT || + edl->rtype != EDL_APP_VER_RES_EVT) { + BT_ERR("%s: Wrong packet received %d %d", hdev->name, + edl->cresp, edl->rtype); + err = -EIO; + goto out; + } + + ver = (struct rome_version *)(edl->data); + + BT_DBG("%s: Product:0x%08x", hdev->name, le32_to_cpu(ver->product_id)); + BT_DBG("%s: Patch :0x%08x", hdev->name, le16_to_cpu(ver->patch_ver)); + BT_DBG("%s: ROM :0x%08x", hdev->name, le16_to_cpu(ver->rome_ver)); + BT_DBG("%s: SOC :0x%08x", hdev->name, le32_to_cpu(ver->soc_id)); + + /* ROME chipset version can be decided by patch and SoC + * version, combination with upper 2 bytes from SoC + * and lower 2 bytes from patch will be used. + */ + *rome_version = (le32_to_cpu(ver->soc_id) << 16) | + (le16_to_cpu(ver->rome_ver) & 0x0000ffff); + +out: + kfree_skb(skb); + + return err; +} + +static int rome_reset(struct hci_dev *hdev) +{ + struct sk_buff *skb; + int err; + + BT_DBG("%s: ROME HCI_RESET", hdev->name); + + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + BT_ERR("%s: Reset failed (%d)", hdev->name, err); + return err; + } + + kfree_skb(skb); + + return 0; +} + +static void rome_tlv_check_data(struct rome_config *config, + const struct firmware *fw) +{ + const u8 *data; + u32 type_len; + u16 tag_id, tag_len; + int idx, length; + struct tlv_type_hdr *tlv; + struct tlv_type_patch *tlv_patch; + struct tlv_type_nvm *tlv_nvm; + + tlv = (struct tlv_type_hdr *)fw->data; + + type_len = le32_to_cpu(tlv->type_len); + length = (type_len >> 8) & 0x00ffffff; + + BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff); + BT_DBG("Length\t\t : %d bytes", length); + + switch (config->type) { + case TLV_TYPE_PATCH: + tlv_patch = (struct tlv_type_patch *)tlv->data; + BT_DBG("Total Length\t\t : %d bytes", + le32_to_cpu(tlv_patch->total_size)); + BT_DBG("Patch Data Length\t : %d bytes", + le32_to_cpu(tlv_patch->data_length)); + BT_DBG("Signing Format Version : 0x%x", + tlv_patch->format_version); + BT_DBG("Signature Algorithm\t : 0x%x", + tlv_patch->signature); + BT_DBG("Reserved\t\t : 0x%x", + le16_to_cpu(tlv_patch->reserved1)); + BT_DBG("Product ID\t\t : 0x%04x", + le16_to_cpu(tlv_patch->product_id)); + BT_DBG("Rom Build Version\t : 0x%04x", + le16_to_cpu(tlv_patch->rom_build)); + BT_DBG("Patch Version\t\t : 0x%04x", + le16_to_cpu(tlv_patch->patch_version)); + BT_DBG("Reserved\t\t : 0x%x", + le16_to_cpu(tlv_patch->reserved2)); + BT_DBG("Patch Entry Address\t : 0x%x", + le32_to_cpu(tlv_patch->entry)); + break; + + case TLV_TYPE_NVM: + idx = 0; + data = tlv->data; + while (idx < length) { + tlv_nvm = (struct tlv_type_nvm *)(data + idx); + + tag_id = le16_to_cpu(tlv_nvm->tag_id); + tag_len = le16_to_cpu(tlv_nvm->tag_len); + + /* Update NVM tags as needed */ + switch (tag_id) { + case EDL_TAG_ID_HCI: + /* HCI transport layer parameters + * enabling software inband sleep + * onto controller side. + */ + tlv_nvm->data[0] |= 0x80; + + /* UART Baud Rate */ + tlv_nvm->data[2] = config->user_baud_rate; + + break; + + case EDL_TAG_ID_DEEP_SLEEP: + /* Sleep enable mask + * enabling deep sleep feature on controller. + */ + tlv_nvm->data[0] |= 0x01; + + break; + } + + idx += (sizeof(u16) + sizeof(u16) + 8 + tag_len); + } + break; + + default: + BT_ERR("Unknown TLV type %d", config->type); + break; + } +} + +static int rome_tlv_send_segment(struct hci_dev *hdev, int idx, int seg_size, + const u8 *data) +{ + struct sk_buff *skb; + struct edl_event_hdr *edl; + struct tlv_seg_resp *tlv_resp; + u8 cmd[MAX_SIZE_PER_TLV_SEGMENT + 2]; + int err = 0; + + BT_DBG("%s: Download segment #%d size %d", hdev->name, idx, seg_size); + + cmd[0] = EDL_PATCH_TLV_REQ_CMD; + cmd[1] = seg_size; + memcpy(cmd + 2, data, seg_size); + + skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd, + HCI_VENDOR_PKT, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + BT_ERR("%s: Failed to send TLV segment (%d)", hdev->name, err); + return err; + } + + if (skb->len != sizeof(*edl) + sizeof(*tlv_resp)) { + BT_ERR("%s: TLV response size mismatch", hdev->name); + err = -EILSEQ; + goto out; + } + + edl = (struct edl_event_hdr *)(skb->data); + if (!edl || !edl->data) { + BT_ERR("%s: TLV with no header or no data", hdev->name); + err = -EILSEQ; + goto out; + } + + tlv_resp = (struct tlv_seg_resp *)(edl->data); + + if (edl->cresp != EDL_CMD_REQ_RES_EVT || + edl->rtype != EDL_TVL_DNLD_RES_EVT || tlv_resp->result != 0x00) { + BT_ERR("%s: TLV with error stat 0x%x rtype 0x%x (0x%x)", + hdev->name, edl->cresp, edl->rtype, tlv_resp->result); + err = -EIO; + } + +out: + kfree_skb(skb); + + return err; +} + +static int rome_tlv_download_request(struct hci_dev *hdev, + const struct firmware *fw) +{ + const u8 *buffer, *data; + int total_segment, remain_size; + int ret, i; + + if (!fw || !fw->data) + return -EINVAL; + + total_segment = fw->size / MAX_SIZE_PER_TLV_SEGMENT; + remain_size = fw->size % MAX_SIZE_PER_TLV_SEGMENT; + + BT_DBG("%s: Total segment num %d remain size %d total size %zu", + hdev->name, total_segment, remain_size, fw->size); + + data = fw->data; + for (i = 0; i < total_segment; i++) { + buffer = data + i * MAX_SIZE_PER_TLV_SEGMENT; + ret = rome_tlv_send_segment(hdev, i, MAX_SIZE_PER_TLV_SEGMENT, + buffer); + if (ret < 0) + return -EIO; + } + + if (remain_size) { + buffer = data + total_segment * MAX_SIZE_PER_TLV_SEGMENT; + ret = rome_tlv_send_segment(hdev, total_segment, remain_size, + buffer); + if (ret < 0) + return -EIO; + } + + return 0; +} + +static int rome_download_firmware(struct hci_dev *hdev, + struct rome_config *config) +{ + const struct firmware *fw; + int ret; + + BT_INFO("%s: ROME Downloading %s", hdev->name, config->fwname); + + ret = reject_firmware(&fw, config->fwname, &hdev->dev); + if (ret) { + BT_ERR("%s: Failed to request file: %s (%d)", hdev->name, + config->fwname, ret); + return ret; + } + + rome_tlv_check_data(config, fw); + + ret = rome_tlv_download_request(hdev, fw); + if (ret) { + BT_ERR("%s: Failed to download file: %s (%d)", hdev->name, + config->fwname, ret); + } + + release_firmware(fw); + + return ret; +} + +int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + struct sk_buff *skb; + u8 cmd[9]; + int err; + + cmd[0] = EDL_NVM_ACCESS_SET_REQ_CMD; + cmd[1] = 0x02; /* TAG ID */ + cmd[2] = sizeof(bdaddr_t); /* size */ + memcpy(cmd + 3, bdaddr, sizeof(bdaddr_t)); + skb = __hci_cmd_sync_ev(hdev, EDL_NVM_ACCESS_OPCODE, sizeof(cmd), cmd, + HCI_VENDOR_PKT, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + BT_ERR("%s: Change address command failed (%d)", + hdev->name, err); + return err; + } + + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome); + +int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate) +{ + u32 rome_ver = 0; + struct rome_config config; + int err; + + BT_DBG("%s: ROME setup on UART", hdev->name); + + config.user_baud_rate = baudrate; + + /* Get ROME version information */ + err = rome_patch_ver_req(hdev, &rome_ver); + if (err < 0 || rome_ver == 0) { + BT_ERR("%s: Failed to get version 0x%x", hdev->name, err); + return err; + } + + BT_INFO("%s: ROME controller version 0x%08x", hdev->name, rome_ver); + + /* Download rampatch file */ + config.type = TLV_TYPE_PATCH; + snprintf(config.fwname, sizeof(config.fwname), "/*(DEBLOBBED)*/", + rome_ver); + err = rome_download_firmware(hdev, &config); + if (err < 0) { + BT_ERR("%s: Failed to download patch (%d)", hdev->name, err); + return err; + } + + /* Download NVM configuration */ + config.type = TLV_TYPE_NVM; + snprintf(config.fwname, sizeof(config.fwname), "/*(DEBLOBBED)*/", + rome_ver); + err = rome_download_firmware(hdev, &config); + if (err < 0) { + BT_ERR("%s: Failed to download NVM (%d)", hdev->name, err); + return err; + } + + /* Perform HCI reset */ + err = rome_reset(hdev); + if (err < 0) { + BT_ERR("%s: Failed to run HCI_RESET (%d)", hdev->name, err); + return err; + } + + BT_INFO("%s: ROME setup on UART is completed", hdev->name); + + return 0; +} +EXPORT_SYMBOL_GPL(qca_uart_setup_rome); + +MODULE_AUTHOR("Ben Young Tae Kim "); +MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h new file mode 100644 index 000000000..65e994b96 --- /dev/null +++ b/drivers/bluetooth/btqca.h @@ -0,0 +1,135 @@ +/* + * Bluetooth supports for Qualcomm Atheros ROME chips + * + * Copyright (c) 2015 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#define EDL_PATCH_CMD_OPCODE (0xFC00) +#define EDL_NVM_ACCESS_OPCODE (0xFC0B) +#define EDL_PATCH_CMD_LEN (1) +#define EDL_PATCH_VER_REQ_CMD (0x19) +#define EDL_PATCH_TLV_REQ_CMD (0x1E) +#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01) +#define MAX_SIZE_PER_TLV_SEGMENT (243) + +#define EDL_CMD_REQ_RES_EVT (0x00) +#define EDL_PATCH_VER_RES_EVT (0x19) +#define EDL_APP_VER_RES_EVT (0x02) +#define EDL_TVL_DNLD_RES_EVT (0x04) +#define EDL_CMD_EXE_STATUS_EVT (0x00) +#define EDL_SET_BAUDRATE_RSP_EVT (0x92) +#define EDL_NVM_ACCESS_CODE_EVT (0x0B) + +#define EDL_TAG_ID_HCI (17) +#define EDL_TAG_ID_DEEP_SLEEP (27) + +enum qca_bardrate { + QCA_BAUDRATE_115200 = 0, + QCA_BAUDRATE_57600, + QCA_BAUDRATE_38400, + QCA_BAUDRATE_19200, + QCA_BAUDRATE_9600, + QCA_BAUDRATE_230400, + QCA_BAUDRATE_250000, + QCA_BAUDRATE_460800, + QCA_BAUDRATE_500000, + QCA_BAUDRATE_720000, + QCA_BAUDRATE_921600, + QCA_BAUDRATE_1000000, + QCA_BAUDRATE_1250000, + QCA_BAUDRATE_2000000, + QCA_BAUDRATE_3000000, + QCA_BAUDRATE_4000000, + QCA_BAUDRATE_1600000, + QCA_BAUDRATE_3200000, + QCA_BAUDRATE_3500000, + QCA_BAUDRATE_AUTO = 0xFE, + QCA_BAUDRATE_RESERVED +}; + +enum rome_tlv_type { + TLV_TYPE_PATCH = 1, + TLV_TYPE_NVM +}; + +struct rome_config { + u8 type; + char fwname[64]; + uint8_t user_baud_rate; +}; + +struct edl_event_hdr { + __u8 cresp; + __u8 rtype; + __u8 data[0]; +} __packed; + +struct rome_version { + __le32 product_id; + __le16 patch_ver; + __le16 rome_ver; + __le32 soc_id; +} __packed; + +struct tlv_seg_resp { + __u8 result; +} __packed; + +struct tlv_type_patch { + __le32 total_size; + __le32 data_length; + __u8 format_version; + __u8 signature; + __le16 reserved1; + __le16 product_id; + __le16 rom_build; + __le16 patch_version; + __le16 reserved2; + __le32 entry; +} __packed; + +struct tlv_type_nvm { + __le16 tag_id; + __le16 tag_len; + __le32 reserve1; + __le32 reserve2; + __u8 data[0]; +} __packed; + +struct tlv_type_hdr { + __le32 type_len; + __u8 data[0]; +} __packed; + +#if IS_ENABLED(CONFIG_BT_QCA) + +int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr); +int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate); + +#else + +static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + return -EOPNOTSUPP; +} + +static inline int qca_uart_setup_rome(struct hci_dev *hdev, int speed) +{ + return -EOPNOTSUPP; +} + +#endif diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 5771183a7..6d8c1e7ca 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -68,6 +68,9 @@ static const struct usb_device_id btusb_table[] = { /* Generic Bluetooth AMP device */ { USB_DEVICE_INFO(0xe0, 0x01, 0x04), .driver_info = BTUSB_AMP }, + /* Generic Bluetooth USB interface */ + { USB_INTERFACE_INFO(0xe0, 0x01, 0x01) }, + /* Apple-specific (Broadcom) devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_APPLE }, @@ -192,6 +195,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, @@ -203,6 +207,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x817b), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, @@ -319,6 +324,9 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK }, + /* Silicon Wave based devices */ + { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE }, + { } /* Terminating entry */ }; @@ -1575,7 +1583,7 @@ static int btusb_setup_intel(struct hci_dev *hdev) /* fw_patch_num indicates the version of patch the device currently * have. If there is no patch data in the device, it is always 0x00. - * So, if it is other than 0x00, no need to patch the deivce again. + * So, if it is other than 0x00, no need to patch the device again. */ if (ver->fw_patch_num) { BT_INFO("%s: Intel device is already patched. patch num: %02x", @@ -1878,51 +1886,6 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb) return -EILSEQ; } -static int btusb_intel_secure_send(struct hci_dev *hdev, u8 fragment_type, - u32 plen, const void *param) -{ - while (plen > 0) { - struct sk_buff *skb; - u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen; - - cmd_param[0] = fragment_type; - memcpy(cmd_param + 1, param, fragment_len); - - skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1, - cmd_param, HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - kfree_skb(skb); - - plen -= fragment_len; - param += fragment_len; - } - - return 0; -} - -static void btusb_intel_version_info(struct hci_dev *hdev, - struct intel_version *ver) -{ - const char *variant; - - switch (ver->fw_variant) { - case 0x06: - variant = "Bootloader"; - break; - case 0x23: - variant = "Firmware"; - break; - default: - return; - } - - BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name, - variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f, - ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy); -} - static int btusb_setup_intel_new(struct hci_dev *hdev) { static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01, @@ -1984,7 +1947,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) return -EINVAL; } - btusb_intel_version_info(hdev, ver); + btintel_version_info(hdev, ver); /* The firmware variant determines if the device is in bootloader * mode or is running operational firmware. The value 0x06 identifies @@ -2104,7 +2067,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) /* Start the firmware download transaction with the Init fragment * represented by the 128 bytes of CSS header. */ - err = btusb_intel_secure_send(hdev, 0x00, 128, fw->data); + err = btintel_secure_send(hdev, 0x00, 128, fw->data); if (err < 0) { BT_ERR("%s: Failed to send firmware header (%d)", hdev->name, err); @@ -2114,7 +2077,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) /* Send the 256 bytes of public key information from the firmware * as the PKey fragment. */ - err = btusb_intel_secure_send(hdev, 0x03, 256, fw->data + 128); + err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128); if (err < 0) { BT_ERR("%s: Failed to send firmware public key (%d)", hdev->name, err); @@ -2124,7 +2087,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) /* Send the 256 bytes of signature information from the firmware * as the Sign fragment. */ - err = btusb_intel_secure_send(hdev, 0x02, 256, fw->data + 388); + err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388); if (err < 0) { BT_ERR("%s: Failed to send firmware signature (%d)", hdev->name, err); @@ -2139,7 +2102,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) frag_len += sizeof(*cmd) + cmd->plen; - /* The paramter length of the secure send command requires + /* The parameter length of the secure send command requires * a 4 byte alignment. It happens so that the firmware file * contains proper Intel_NOP commands to align the fragments * as needed. @@ -2148,8 +2111,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) * firmware data buffer as a single Data fragement. */ if (!(frag_len % 4)) { - err = btusb_intel_secure_send(hdev, 0x01, frag_len, - fw_ptr); + err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr); if (err < 0) { BT_ERR("%s: Failed to send firmware data (%d)", hdev->name, err); @@ -2291,39 +2253,6 @@ done: return 0; } -static void btusb_hw_error_intel(struct hci_dev *hdev, u8 code) -{ - struct sk_buff *skb; - u8 type = 0x00; - - BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code); - - skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - BT_ERR("%s: Reset after hardware error failed (%ld)", - hdev->name, PTR_ERR(skb)); - return; - } - kfree_skb(skb); - - skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - BT_ERR("%s: Retrieving Intel exception info failed (%ld)", - hdev->name, PTR_ERR(skb)); - return; - } - - if (skb->len != 13) { - BT_ERR("%s: Exception info size mismatch", hdev->name); - kfree_skb(skb); - return; - } - - BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1)); - - kfree_skb(skb); -} - static int btusb_shutdown_intel(struct hci_dev *hdev) { struct sk_buff *skb; @@ -2783,7 +2712,7 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_INTEL_NEW) { hdev->send = btusb_send_frame_intel; hdev->setup = btusb_setup_intel_new; - hdev->hw_error = btusb_hw_error_intel; + hdev->hw_error = btintel_hw_error; hdev->set_bdaddr = btintel_set_bdaddr; set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); } diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c index 78e10f0c6..84135c54e 100644 --- a/drivers/bluetooth/dtl1_cs.c +++ b/drivers/bluetooth/dtl1_cs.c @@ -182,9 +182,9 @@ static void dtl1_control(struct dtl1_info *info, struct sk_buff *skb) int i; printk(KERN_INFO "Bluetooth: Nokia control data ="); - for (i = 0; i < skb->len; i++) { + for (i = 0; i < skb->len; i++) printk(" %02x", skb->data[i]); - } + printk("\n"); /* transition to active state */ @@ -406,7 +406,7 @@ static int dtl1_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) break; default: return -EILSEQ; - }; + } nsh.zero = 0; nsh.len = skb->len; diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 58d619492..de00082b8 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -25,6 +25,12 @@ #include #include #include +#include +#include +#include +#include +#include +#include #include #include @@ -32,11 +38,37 @@ #include "btbcm.h" #include "hci_uart.h" +struct bcm_device { + struct list_head list; + + struct platform_device *pdev; + + const char *name; + struct gpio_desc *device_wakeup; + struct gpio_desc *shutdown; + + struct clk *clk; + bool clk_enabled; + + u32 init_speed; + +#ifdef CONFIG_PM_SLEEP + struct hci_uart *hu; + bool is_suspended; /* suspend/resume flag */ +#endif +}; + struct bcm_data { - struct sk_buff *rx_skb; - struct sk_buff_head txq; + struct sk_buff *rx_skb; + struct sk_buff_head txq; + + struct bcm_device *dev; }; +/* List of BCM BT UART devices */ +static DEFINE_SPINLOCK(bcm_device_lock); +static LIST_HEAD(bcm_device_list); + static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed) { struct hci_dev *hdev = hu->hdev; @@ -86,9 +118,41 @@ static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed) return 0; } +/* bcm_device_exists should be protected by bcm_device_lock */ +static bool bcm_device_exists(struct bcm_device *device) +{ + struct list_head *p; + + list_for_each(p, &bcm_device_list) { + struct bcm_device *dev = list_entry(p, struct bcm_device, list); + + if (device == dev) + return true; + } + + return false; +} + +static int bcm_gpio_set_power(struct bcm_device *dev, bool powered) +{ + if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled) + clk_enable(dev->clk); + + gpiod_set_value(dev->shutdown, powered); + gpiod_set_value(dev->device_wakeup, powered); + + if (!powered && !IS_ERR(dev->clk) && dev->clk_enabled) + clk_disable(dev->clk); + + dev->clk_enabled = powered; + + return 0; +} + static int bcm_open(struct hci_uart *hu) { struct bcm_data *bcm; + struct list_head *p; BT_DBG("hu %p", hu); @@ -99,6 +163,30 @@ static int bcm_open(struct hci_uart *hu) skb_queue_head_init(&bcm->txq); hu->priv = bcm; + + spin_lock(&bcm_device_lock); + list_for_each(p, &bcm_device_list) { + struct bcm_device *dev = list_entry(p, struct bcm_device, list); + + /* Retrieve saved bcm_device based on parent of the + * platform device (saved during device probe) and + * parent of tty device used by hci_uart + */ + if (hu->tty->dev->parent == dev->pdev->dev.parent) { + bcm->dev = dev; + hu->init_speed = dev->init_speed; +#ifdef CONFIG_PM_SLEEP + dev->hu = hu; +#endif + break; + } + } + + if (bcm->dev) + bcm_gpio_set_power(bcm->dev, true); + + spin_unlock(&bcm_device_lock); + return 0; } @@ -108,6 +196,16 @@ static int bcm_close(struct hci_uart *hu) BT_DBG("hu %p", hu); + /* Protect bcm->dev against removal of the device or driver */ + spin_lock(&bcm_device_lock); + if (bcm_device_exists(bcm->dev)) { + bcm_gpio_set_power(bcm->dev, false); +#ifdef CONFIG_PM_SLEEP + bcm->dev->hu = NULL; +#endif + } + spin_unlock(&bcm_device_lock); + skb_queue_purge(&bcm->txq); kfree_skb(bcm->rx_skb); kfree(bcm); @@ -232,6 +330,204 @@ static struct sk_buff *bcm_dequeue(struct hci_uart *hu) return skb_dequeue(&bcm->txq); } +#ifdef CONFIG_PM_SLEEP +/* Platform suspend callback */ +static int bcm_suspend(struct device *dev) +{ + struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev)); + + BT_DBG("suspend (%p): is_suspended %d", bdev, bdev->is_suspended); + + spin_lock(&bcm_device_lock); + + if (!bdev->hu) + goto unlock; + + if (!bdev->is_suspended) { + hci_uart_set_flow_control(bdev->hu, true); + + /* Once this callback returns, driver suspends BT via GPIO */ + bdev->is_suspended = true; + } + + /* Suspend the device */ + if (bdev->device_wakeup) { + gpiod_set_value(bdev->device_wakeup, false); + BT_DBG("suspend, delaying 15 ms"); + mdelay(15); + } + +unlock: + spin_unlock(&bcm_device_lock); + + return 0; +} + +/* Platform resume callback */ +static int bcm_resume(struct device *dev) +{ + struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev)); + + BT_DBG("resume (%p): is_suspended %d", bdev, bdev->is_suspended); + + spin_lock(&bcm_device_lock); + + if (!bdev->hu) + goto unlock; + + if (bdev->device_wakeup) { + gpiod_set_value(bdev->device_wakeup, true); + BT_DBG("resume, delaying 15 ms"); + mdelay(15); + } + + /* When this callback executes, the device has woken up already */ + if (bdev->is_suspended) { + bdev->is_suspended = false; + + hci_uart_set_flow_control(bdev->hu, false); + } + +unlock: + spin_unlock(&bcm_device_lock); + + return 0; +} +#endif + +static const struct acpi_gpio_params device_wakeup_gpios = { 0, 0, false }; +static const struct acpi_gpio_params shutdown_gpios = { 1, 0, false }; + +static const struct acpi_gpio_mapping acpi_bcm_default_gpios[] = { + { "device-wakeup-gpios", &device_wakeup_gpios, 1 }, + { "shutdown-gpios", &shutdown_gpios, 1 }, + { }, +}; + +#ifdef CONFIG_ACPI +static int bcm_resource(struct acpi_resource *ares, void *data) +{ + struct bcm_device *dev = data; + + if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { + struct acpi_resource_uart_serialbus *sb; + + sb = &ares->data.uart_serial_bus; + if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART) + dev->init_speed = sb->default_baud_rate; + } + + /* Always tell the ACPI core to skip this resource */ + return 1; +} + +static int bcm_acpi_probe(struct bcm_device *dev) +{ + struct platform_device *pdev = dev->pdev; + const struct acpi_device_id *id; + struct acpi_device *adev; + LIST_HEAD(resources); + int ret; + + id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev); + if (!id) + return -ENODEV; + + /* Retrieve GPIO data */ + dev->name = dev_name(&pdev->dev); + ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev), + acpi_bcm_default_gpios); + if (ret) + return ret; + + dev->clk = devm_clk_get(&pdev->dev, NULL); + + dev->device_wakeup = devm_gpiod_get_optional(&pdev->dev, + "device-wakeup", + GPIOD_OUT_LOW); + if (IS_ERR(dev->device_wakeup)) + return PTR_ERR(dev->device_wakeup); + + dev->shutdown = devm_gpiod_get_optional(&pdev->dev, "shutdown", + GPIOD_OUT_LOW); + if (IS_ERR(dev->shutdown)) + return PTR_ERR(dev->shutdown); + + /* Make sure at-least one of the GPIO is defined and that + * a name is specified for this instance + */ + if ((!dev->device_wakeup && !dev->shutdown) || !dev->name) { + dev_err(&pdev->dev, "invalid platform data\n"); + return -EINVAL; + } + + /* Retrieve UART ACPI info */ + adev = ACPI_COMPANION(&dev->pdev->dev); + if (!adev) + return 0; + + acpi_dev_get_resources(adev, &resources, bcm_resource, dev); + + return 0; +} +#else +static int bcm_acpi_probe(struct bcm_device *dev) +{ + return -EINVAL; +} +#endif /* CONFIG_ACPI */ + +static int bcm_probe(struct platform_device *pdev) +{ + struct bcm_device *dev; + struct acpi_device_id *pdata = pdev->dev.platform_data; + int ret; + + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + dev->pdev = pdev; + + if (ACPI_HANDLE(&pdev->dev)) { + ret = bcm_acpi_probe(dev); + if (ret) + return ret; + } else if (pdata) { + dev->name = pdata->id; + } else { + return -ENODEV; + } + + platform_set_drvdata(pdev, dev); + + dev_info(&pdev->dev, "%s device registered.\n", dev->name); + + /* Place this instance on the device list */ + spin_lock(&bcm_device_lock); + list_add_tail(&dev->list, &bcm_device_list); + spin_unlock(&bcm_device_lock); + + bcm_gpio_set_power(dev, false); + + return 0; +} + +static int bcm_remove(struct platform_device *pdev) +{ + struct bcm_device *dev = platform_get_drvdata(pdev); + + spin_lock(&bcm_device_lock); + list_del(&dev->list); + spin_unlock(&bcm_device_lock); + + acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev)); + + dev_info(&pdev->dev, "%s device unregistered.\n", dev->name); + + return 0; +} + static const struct hci_uart_proto bcm_proto = { .id = HCI_UART_BCM, .name = "BCM", @@ -247,12 +543,38 @@ static const struct hci_uart_proto bcm_proto = { .dequeue = bcm_dequeue, }; +#ifdef CONFIG_ACPI +static const struct acpi_device_id bcm_acpi_match[] = { + { "BCM2E39", 0 }, + { "BCM2E67", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, bcm_acpi_match); +#endif + +/* Platform suspend and resume callbacks */ +static SIMPLE_DEV_PM_OPS(bcm_pm_ops, bcm_suspend, bcm_resume); + +static struct platform_driver bcm_driver = { + .probe = bcm_probe, + .remove = bcm_remove, + .driver = { + .name = "hci_bcm", + .acpi_match_table = ACPI_PTR(bcm_acpi_match), + .pm = &bcm_pm_ops, + }, +}; + int __init bcm_init(void) { + platform_driver_register(&bcm_driver); + return hci_uart_register_proto(&bcm_proto); } int __exit bcm_deinit(void) { + platform_driver_unregister(&bcm_driver); + return hci_uart_unregister_proto(&bcm_proto); } diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c index 57faddc53..eec3f28e4 100644 --- a/drivers/bluetooth/hci_h4.c +++ b/drivers/bluetooth/hci_h4.c @@ -223,8 +223,7 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, switch ((&pkts[i])->lsize) { case 0: /* No variable data length */ - (&pkts[i])->recv(hdev, skb); - skb = NULL; + dlen = 0; break; case 1: /* Single octet variable length */ @@ -252,6 +251,12 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, kfree_skb(skb); return ERR_PTR(-EILSEQ); } + + if (!dlen) { + /* No more data, complete frame */ + (&pkts[i])->recv(hdev, skb); + skb = NULL; + } } else { /* Complete frame */ (&pkts[i])->recv(hdev, skb); diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index 3455cecc9..b35b238a0 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -75,7 +75,7 @@ struct h5 { size_t rx_pending; /* Expecting more bytes */ u8 rx_ack; /* Last ack number received */ - int (*rx_func) (struct hci_uart *hu, u8 c); + int (*rx_func)(struct hci_uart *hu, u8 c); struct timer_list timer; /* Retransmission timer */ diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c index 5dd07bf05..3a4e7c63f 100644 --- a/drivers/bluetooth/hci_intel.c +++ b/drivers/bluetooth/hci_intel.c @@ -24,8 +24,864 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include #include #include #include "hci_uart.h" +#include "btintel.h" + +#define STATE_BOOTLOADER 0 +#define STATE_DOWNLOADING 1 +#define STATE_FIRMWARE_LOADED 2 +#define STATE_FIRMWARE_FAILED 3 +#define STATE_BOOTING 4 + +struct intel_device { + struct list_head list; + struct platform_device *pdev; + struct gpio_desc *reset; +}; + +static LIST_HEAD(intel_device_list); +static DEFINE_SPINLOCK(intel_device_list_lock); + +struct intel_data { + struct sk_buff *rx_skb; + struct sk_buff_head txq; + unsigned long flags; +}; + +static u8 intel_convert_speed(unsigned int speed) +{ + switch (speed) { + case 9600: + return 0x00; + case 19200: + return 0x01; + case 38400: + return 0x02; + case 57600: + return 0x03; + case 115200: + return 0x04; + case 230400: + return 0x05; + case 460800: + return 0x06; + case 921600: + return 0x07; + case 1843200: + return 0x08; + case 3250000: + return 0x09; + case 2000000: + return 0x0a; + case 3000000: + return 0x0b; + default: + return 0xff; + } +} + +static int intel_wait_booting(struct hci_uart *hu) +{ + struct intel_data *intel = hu->priv; + int err; + + err = wait_on_bit_timeout(&intel->flags, STATE_BOOTING, + TASK_INTERRUPTIBLE, + msecs_to_jiffies(1000)); + + if (err == 1) { + BT_ERR("%s: Device boot interrupted", hu->hdev->name); + return -EINTR; + } + + if (err) { + BT_ERR("%s: Device boot timeout", hu->hdev->name); + return -ETIMEDOUT; + } + + return err; +} + +static int intel_set_power(struct hci_uart *hu, bool powered) +{ + struct list_head *p; + int err = -ENODEV; + + spin_lock(&intel_device_list_lock); + + list_for_each(p, &intel_device_list) { + struct intel_device *idev = list_entry(p, struct intel_device, + list); + + /* tty device and pdev device should share the same parent + * which is the UART port. + */ + if (hu->tty->dev->parent != idev->pdev->dev.parent) + continue; + + if (!idev->reset) { + err = -ENOTSUPP; + break; + } + + BT_INFO("hu %p, Switching compatible pm device (%s) to %u", + hu, dev_name(&idev->pdev->dev), powered); + + gpiod_set_value(idev->reset, powered); + } + + spin_unlock(&intel_device_list_lock); + + return err; +} + +static int intel_open(struct hci_uart *hu) +{ + struct intel_data *intel; + + BT_DBG("hu %p", hu); + + intel = kzalloc(sizeof(*intel), GFP_KERNEL); + if (!intel) + return -ENOMEM; + + skb_queue_head_init(&intel->txq); + + hu->priv = intel; + + if (!intel_set_power(hu, true)) + set_bit(STATE_BOOTING, &intel->flags); + + return 0; +} + +static int intel_close(struct hci_uart *hu) +{ + struct intel_data *intel = hu->priv; + + BT_DBG("hu %p", hu); + + intel_set_power(hu, false); + + skb_queue_purge(&intel->txq); + kfree_skb(intel->rx_skb); + kfree(intel); + + hu->priv = NULL; + return 0; +} + +static int intel_flush(struct hci_uart *hu) +{ + struct intel_data *intel = hu->priv; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&intel->txq); + + return 0; +} + +static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode) +{ + struct sk_buff *skb; + struct hci_event_hdr *hdr; + struct hci_ev_cmd_complete *evt; + + skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr)); + hdr->evt = HCI_EV_CMD_COMPLETE; + hdr->plen = sizeof(*evt) + 1; + + evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt)); + evt->ncmd = 0x01; + evt->opcode = cpu_to_le16(opcode); + + *skb_put(skb, 1) = 0x00; + + bt_cb(skb)->pkt_type = HCI_EVENT_PKT; + + return hci_recv_frame(hdev, skb); +} + +static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed) +{ + struct intel_data *intel = hu->priv; + struct hci_dev *hdev = hu->hdev; + u8 speed_cmd[] = { 0x06, 0xfc, 0x01, 0x00 }; + struct sk_buff *skb; + int err; + + /* This can be the first command sent to the chip, check + * that the controller is ready. + */ + err = intel_wait_booting(hu); + + clear_bit(STATE_BOOTING, &intel->flags); + + /* In case of timeout, try to continue anyway */ + if (err && err != ETIMEDOUT) + return err; + + BT_INFO("%s: Change controller speed to %d", hdev->name, speed); + + speed_cmd[3] = intel_convert_speed(speed); + if (speed_cmd[3] == 0xff) { + BT_ERR("%s: Unsupported speed", hdev->name); + return -EINVAL; + } + + /* Device will not accept speed change if Intel version has not been + * previously requested. + */ + skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: Reading Intel version information failed (%ld)", + hdev->name, PTR_ERR(skb)); + return PTR_ERR(skb); + } + kfree_skb(skb); + + skb = bt_skb_alloc(sizeof(speed_cmd), GFP_KERNEL); + if (!skb) { + BT_ERR("%s: Failed to allocate memory for baudrate packet", + hdev->name); + return -ENOMEM; + } + + memcpy(skb_put(skb, sizeof(speed_cmd)), speed_cmd, sizeof(speed_cmd)); + bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; + + hci_uart_set_flow_control(hu, true); + + skb_queue_tail(&intel->txq, skb); + hci_uart_tx_wakeup(hu); + + /* wait 100ms to change baudrate on controller side */ + msleep(100); + + hci_uart_set_baudrate(hu, speed); + hci_uart_set_flow_control(hu, false); + + return 0; +} + +static int intel_setup(struct hci_uart *hu) +{ + static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01, + 0x00, 0x08, 0x04, 0x00 }; + struct intel_data *intel = hu->priv; + struct hci_dev *hdev = hu->hdev; + struct sk_buff *skb; + struct intel_version *ver; + struct intel_boot_params *params; + const struct firmware *fw; + const u8 *fw_ptr; + char fwname[64]; + u32 frag_len; + ktime_t calltime, delta, rettime; + unsigned long long duration; + unsigned int init_speed, oper_speed; + int speed_change = 0; + int err; + + BT_DBG("%s", hdev->name); + + hu->hdev->set_bdaddr = btintel_set_bdaddr; + + calltime = ktime_get(); + + if (hu->init_speed) + init_speed = hu->init_speed; + else + init_speed = hu->proto->init_speed; + + if (hu->oper_speed) + oper_speed = hu->oper_speed; + else + oper_speed = hu->proto->oper_speed; + + if (oper_speed && init_speed && oper_speed != init_speed) + speed_change = 1; + + /* Check that the controller is ready */ + err = intel_wait_booting(hu); + + clear_bit(STATE_BOOTING, &intel->flags); + + /* In case of timeout, try to continue anyway */ + if (err && err != ETIMEDOUT) + return err; + + set_bit(STATE_BOOTLOADER, &intel->flags); + + /* Read the Intel version information to determine if the device + * is in bootloader mode or if it already has operational firmware + * loaded. + */ + skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: Reading Intel version information failed (%ld)", + hdev->name, PTR_ERR(skb)); + return PTR_ERR(skb); + } + + if (skb->len != sizeof(*ver)) { + BT_ERR("%s: Intel version event size mismatch", hdev->name); + kfree_skb(skb); + return -EILSEQ; + } + + ver = (struct intel_version *)skb->data; + if (ver->status) { + BT_ERR("%s: Intel version command failure (%02x)", + hdev->name, ver->status); + err = -bt_to_errno(ver->status); + kfree_skb(skb); + return err; + } + + /* The hardware platform number has a fixed value of 0x37 and + * for now only accept this single value. + */ + if (ver->hw_platform != 0x37) { + BT_ERR("%s: Unsupported Intel hardware platform (%u)", + hdev->name, ver->hw_platform); + kfree_skb(skb); + return -EINVAL; + } + + /* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is + * supported by this firmware loading method. This check has been + * put in place to ensure correct forward compatibility options + * when newer hardware variants come along. + */ + if (ver->hw_variant != 0x0b) { + BT_ERR("%s: Unsupported Intel hardware variant (%u)", + hdev->name, ver->hw_variant); + kfree_skb(skb); + return -EINVAL; + } + + btintel_version_info(hdev, ver); + + /* The firmware variant determines if the device is in bootloader + * mode or is running operational firmware. The value 0x06 identifies + * the bootloader and the value 0x23 identifies the operational + * firmware. + * + * When the operational firmware is already present, then only + * the check for valid Bluetooth device address is needed. This + * determines if the device will be added as configured or + * unconfigured controller. + * + * It is not possible to use the Secure Boot Parameters in this + * case since that command is only available in bootloader mode. + */ + if (ver->fw_variant == 0x23) { + kfree_skb(skb); + clear_bit(STATE_BOOTLOADER, &intel->flags); + btintel_check_bdaddr(hdev); + return 0; + } + + /* If the device is not in bootloader mode, then the only possible + * choice is to return an error and abort the device initialization. + */ + if (ver->fw_variant != 0x06) { + BT_ERR("%s: Unsupported Intel firmware variant (%u)", + hdev->name, ver->fw_variant); + kfree_skb(skb); + return -ENODEV; + } + + kfree_skb(skb); + + /* Read the secure boot parameters to identify the operating + * details of the bootloader. + */ + skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: Reading Intel boot parameters failed (%ld)", + hdev->name, PTR_ERR(skb)); + return PTR_ERR(skb); + } + + if (skb->len != sizeof(*params)) { + BT_ERR("%s: Intel boot parameters size mismatch", hdev->name); + kfree_skb(skb); + return -EILSEQ; + } + + params = (struct intel_boot_params *)skb->data; + if (params->status) { + BT_ERR("%s: Intel boot parameters command failure (%02x)", + hdev->name, params->status); + err = -bt_to_errno(params->status); + kfree_skb(skb); + return err; + } + + BT_INFO("%s: Device revision is %u", hdev->name, + le16_to_cpu(params->dev_revid)); + + BT_INFO("%s: Secure boot is %s", hdev->name, + params->secure_boot ? "enabled" : "disabled"); + + BT_INFO("%s: Minimum firmware build %u week %u %u", hdev->name, + params->min_fw_build_nn, params->min_fw_build_cw, + 2000 + params->min_fw_build_yy); + + /* It is required that every single firmware fragment is acknowledged + * with a command complete event. If the boot parameters indicate + * that this bootloader does not send them, then abort the setup. + */ + if (params->limited_cce != 0x00) { + BT_ERR("%s: Unsupported Intel firmware loading method (%u)", + hdev->name, params->limited_cce); + kfree_skb(skb); + return -EINVAL; + } + + /* If the OTP has no valid Bluetooth device address, then there will + * also be no valid address for the operational firmware. + */ + if (!bacmp(¶ms->otp_bdaddr, BDADDR_ANY)) { + BT_INFO("%s: No device address configured", hdev->name); + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + } + + /* With this Intel bootloader only the hardware variant and device + * revision information are used to select the right firmware. + * + * Currently this bootloader support is limited to hardware variant + * iBT 3.0 (LnP/SfP) which is identified by the value 11 (0x0b). + */ + snprintf(fwname, sizeof(fwname), "/*(DEBLOBBED)*/", + le16_to_cpu(params->dev_revid)); + + err = reject_firmware(&fw, fwname, &hdev->dev); + if (err < 0) { + BT_ERR("%s: Failed to load Intel firmware file (%d)", + hdev->name, err); + kfree_skb(skb); + return err; + } + + BT_INFO("%s: Found device firmware: %s", hdev->name, fwname); + + kfree_skb(skb); + + if (fw->size < 644) { + BT_ERR("%s: Invalid size of firmware file (%zu)", + hdev->name, fw->size); + err = -EBADF; + goto done; + } + + set_bit(STATE_DOWNLOADING, &intel->flags); + + /* Start the firmware download transaction with the Init fragment + * represented by the 128 bytes of CSS header. + */ + err = btintel_secure_send(hdev, 0x00, 128, fw->data); + if (err < 0) { + BT_ERR("%s: Failed to send firmware header (%d)", + hdev->name, err); + goto done; + } + + /* Send the 256 bytes of public key information from the firmware + * as the PKey fragment. + */ + err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128); + if (err < 0) { + BT_ERR("%s: Failed to send firmware public key (%d)", + hdev->name, err); + goto done; + } + + /* Send the 256 bytes of signature information from the firmware + * as the Sign fragment. + */ + err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388); + if (err < 0) { + BT_ERR("%s: Failed to send firmware signature (%d)", + hdev->name, err); + goto done; + } + + fw_ptr = fw->data + 644; + frag_len = 0; + + while (fw_ptr - fw->data < fw->size) { + struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len); + + frag_len += sizeof(*cmd) + cmd->plen; + + BT_DBG("%s: patching %td/%zu", hdev->name, + (fw_ptr - fw->data), fw->size); + + /* The parameter length of the secure send command requires + * a 4 byte alignment. It happens so that the firmware file + * contains proper Intel_NOP commands to align the fragments + * as needed. + * + * Send set of commands with 4 byte alignment from the + * firmware data buffer as a single Data fragement. + */ + if (frag_len % 4) + continue; + + /* Send each command from the firmware data buffer as + * a single Data fragment. + */ + err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr); + if (err < 0) { + BT_ERR("%s: Failed to send firmware data (%d)", + hdev->name, err); + goto done; + } + + fw_ptr += frag_len; + frag_len = 0; + } + + set_bit(STATE_FIRMWARE_LOADED, &intel->flags); + + BT_INFO("%s: Waiting for firmware download to complete", hdev->name); + + /* Before switching the device into operational mode and with that + * booting the loaded firmware, wait for the bootloader notification + * that all fragments have been successfully received. + * + * When the event processing receives the notification, then the + * STATE_DOWNLOADING flag will be cleared. + * + * The firmware loading should not take longer than 5 seconds + * and thus just timeout if that happens and fail the setup + * of this device. + */ + err = wait_on_bit_timeout(&intel->flags, STATE_DOWNLOADING, + TASK_INTERRUPTIBLE, + msecs_to_jiffies(5000)); + if (err == 1) { + BT_ERR("%s: Firmware loading interrupted", hdev->name); + err = -EINTR; + goto done; + } + + if (err) { + BT_ERR("%s: Firmware loading timeout", hdev->name); + err = -ETIMEDOUT; + goto done; + } + + if (test_bit(STATE_FIRMWARE_FAILED, &intel->flags)) { + BT_ERR("%s: Firmware loading failed", hdev->name); + err = -ENOEXEC; + goto done; + } + + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long) ktime_to_ns(delta) >> 10; + + BT_INFO("%s: Firmware loaded in %llu usecs", hdev->name, duration); + +done: + release_firmware(fw); + + if (err < 0) + return err; + + /* We need to restore the default speed before Intel reset */ + if (speed_change) { + err = intel_set_baudrate(hu, init_speed); + if (err) + return err; + } + + calltime = ktime_get(); + + set_bit(STATE_BOOTING, &intel->flags); + + skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + + /* The bootloader will not indicate when the device is ready. This + * is done by the operational firmware sending bootup notification. + * + * Booting into operational firmware should not take longer than + * 1 second. However if that happens, then just fail the setup + * since something went wrong. + */ + BT_INFO("%s: Waiting for device to boot", hdev->name); + + err = intel_wait_booting(hu); + if (err) + return err; + + clear_bit(STATE_BOOTING, &intel->flags); + + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long) ktime_to_ns(delta) >> 10; + + BT_INFO("%s: Device booted in %llu usecs", hdev->name, duration); + + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + kfree_skb(skb); + + if (speed_change) { + err = intel_set_baudrate(hu, oper_speed); + if (err) + return err; + } + + BT_INFO("%s: Setup complete", hdev->name); + + clear_bit(STATE_BOOTLOADER, &intel->flags); + + return 0; +} + +static int intel_recv_event(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct intel_data *intel = hu->priv; + struct hci_event_hdr *hdr; + + if (!test_bit(STATE_BOOTLOADER, &intel->flags) && + !test_bit(STATE_BOOTING, &intel->flags)) + goto recv; + + hdr = (void *)skb->data; + + /* When the firmware loading completes the device sends + * out a vendor specific event indicating the result of + * the firmware loading. + */ + if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 && + skb->data[2] == 0x06) { + if (skb->data[3] != 0x00) + set_bit(STATE_FIRMWARE_FAILED, &intel->flags); + + if (test_and_clear_bit(STATE_DOWNLOADING, &intel->flags) && + test_bit(STATE_FIRMWARE_LOADED, &intel->flags)) { + smp_mb__after_atomic(); + wake_up_bit(&intel->flags, STATE_DOWNLOADING); + } + + /* When switching to the operational firmware the device + * sends a vendor specific event indicating that the bootup + * completed. + */ + } else if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 && + skb->data[2] == 0x02) { + if (test_and_clear_bit(STATE_BOOTING, &intel->flags)) { + smp_mb__after_atomic(); + wake_up_bit(&intel->flags, STATE_BOOTING); + } + } +recv: + return hci_recv_frame(hdev, skb); +} + +static const struct h4_recv_pkt intel_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = intel_recv_event }, +}; + +static int intel_recv(struct hci_uart *hu, const void *data, int count) +{ + struct intel_data *intel = hu->priv; + + if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) + return -EUNATCH; + + intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count, + intel_recv_pkts, + ARRAY_SIZE(intel_recv_pkts)); + if (IS_ERR(intel->rx_skb)) { + int err = PTR_ERR(intel->rx_skb); + BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err); + intel->rx_skb = NULL; + return err; + } + + return count; +} + +static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + struct intel_data *intel = hu->priv; + + BT_DBG("hu %p skb %p", hu, skb); + + skb_queue_tail(&intel->txq, skb); + + return 0; +} + +static struct sk_buff *intel_dequeue(struct hci_uart *hu) +{ + struct intel_data *intel = hu->priv; + struct sk_buff *skb; + + skb = skb_dequeue(&intel->txq); + if (!skb) + return skb; + + if (test_bit(STATE_BOOTLOADER, &intel->flags) && + (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT)) { + struct hci_command_hdr *cmd = (void *)skb->data; + __u16 opcode = le16_to_cpu(cmd->opcode); + + /* When the 0xfc01 command is issued to boot into + * the operational firmware, it will actually not + * send a command complete event. To keep the flow + * control working inject that event here. + */ + if (opcode == 0xfc01) + inject_cmd_complete(hu->hdev, opcode); + } + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + + return skb; +} + +static const struct hci_uart_proto intel_proto = { + .id = HCI_UART_INTEL, + .name = "Intel", + .init_speed = 115200, + .oper_speed = 3000000, + .open = intel_open, + .close = intel_close, + .flush = intel_flush, + .setup = intel_setup, + .set_baudrate = intel_set_baudrate, + .recv = intel_recv, + .enqueue = intel_enqueue, + .dequeue = intel_dequeue, +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id intel_acpi_match[] = { + { "INT33E1", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, intel_acpi_match); + +static int intel_acpi_probe(struct intel_device *idev) +{ + const struct acpi_device_id *id; + + id = acpi_match_device(intel_acpi_match, &idev->pdev->dev); + if (!id) + return -ENODEV; + + return 0; +} +#else +static int intel_acpi_probe(struct intel_device *idev) +{ + return -ENODEV; +} +#endif + +static int intel_probe(struct platform_device *pdev) +{ + struct intel_device *idev; + + idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL); + if (!idev) + return -ENOMEM; + + idev->pdev = pdev; + + if (ACPI_HANDLE(&pdev->dev)) { + int err = intel_acpi_probe(idev); + if (err) + return err; + } else { + return -ENODEV; + } + + idev->reset = devm_gpiod_get_optional(&pdev->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(idev->reset)) { + dev_err(&pdev->dev, "Unable to retrieve gpio\n"); + return PTR_ERR(idev->reset); + } + + platform_set_drvdata(pdev, idev); + + /* Place this instance on the device list */ + spin_lock(&intel_device_list_lock); + list_add_tail(&idev->list, &intel_device_list); + spin_unlock(&intel_device_list_lock); + + dev_info(&pdev->dev, "registered.\n"); + + return 0; +} + +static int intel_remove(struct platform_device *pdev) +{ + struct intel_device *idev = platform_get_drvdata(pdev); + + spin_lock(&intel_device_list_lock); + list_del(&idev->list); + spin_unlock(&intel_device_list_lock); + + dev_info(&pdev->dev, "unregistered.\n"); + + return 0; +} + +static struct platform_driver intel_driver = { + .probe = intel_probe, + .remove = intel_remove, + .driver = { + .name = "hci_intel", + .acpi_match_table = ACPI_PTR(intel_acpi_match), + }, +}; + +int __init intel_init(void) +{ + platform_driver_register(&intel_driver); + + return hci_uart_register_proto(&intel_proto); +} + +int __exit intel_deinit(void) +{ + platform_driver_unregister(&intel_driver); + + return hci_uart_unregister_proto(&intel_proto); +} diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 177dd69fd..0d5a05a7c 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -770,7 +770,7 @@ static int __init hci_uart_init(void) /* Register the tty discipline */ - memset(&hci_uart_ldisc, 0, sizeof (hci_uart_ldisc)); + memset(&hci_uart_ldisc, 0, sizeof(hci_uart_ldisc)); hci_uart_ldisc.magic = TTY_LDISC_MAGIC; hci_uart_ldisc.name = "n_hci"; hci_uart_ldisc.open = hci_uart_tty_open; @@ -804,9 +804,15 @@ static int __init hci_uart_init(void) #ifdef CONFIG_BT_HCIUART_3WIRE h5_init(); #endif +#ifdef CONFIG_BT_HCIUART_INTEL + intel_init(); +#endif #ifdef CONFIG_BT_HCIUART_BCM bcm_init(); #endif +#ifdef CONFIG_BT_HCIUART_QCA + qca_init(); +#endif return 0; } @@ -830,9 +836,15 @@ static void __exit hci_uart_exit(void) #ifdef CONFIG_BT_HCIUART_3WIRE h5_deinit(); #endif +#ifdef CONFIG_BT_HCIUART_INTEL + intel_deinit(); +#endif #ifdef CONFIG_BT_HCIUART_BCM bcm_deinit(); #endif +#ifdef CONFIG_BT_HCIUART_QCA + qca_deinit(); +#endif /* Release tty registration of line discipline */ err = tty_unregister_ldisc(N_HCI); diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c new file mode 100644 index 000000000..6b9b91267 --- /dev/null +++ b/drivers/bluetooth/hci_qca.c @@ -0,0 +1,969 @@ +/* + * Bluetooth Software UART Qualcomm protocol + * + * HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management + * protocol extension to H4. + * + * Copyright (C) 2007 Texas Instruments, Inc. + * Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved. + * + * Acknowledgements: + * This file is based on hci_ll.c, which was... + * Written by Ohad Ben-Cohen + * which was in turn based on hci_h4.c, which was written + * by Maxim Krasnyansky and Marcel Holtmann. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include + +#include +#include + +#include "hci_uart.h" +#include "btqca.h" + +/* HCI_IBS protocol messages */ +#define HCI_IBS_SLEEP_IND 0xFE +#define HCI_IBS_WAKE_IND 0xFD +#define HCI_IBS_WAKE_ACK 0xFC +#define HCI_MAX_IBS_SIZE 10 + +/* Controller states */ +#define STATE_IN_BAND_SLEEP_ENABLED 1 + +#define IBS_WAKE_RETRANS_TIMEOUT_MS 100 +#define IBS_TX_IDLE_TIMEOUT_MS 2000 +#define BAUDRATE_SETTLE_TIMEOUT_MS 300 + +/* HCI_IBS transmit side sleep protocol states */ +enum tx_ibs_states { + HCI_IBS_TX_ASLEEP, + HCI_IBS_TX_WAKING, + HCI_IBS_TX_AWAKE, +}; + +/* HCI_IBS receive side sleep protocol states */ +enum rx_states { + HCI_IBS_RX_ASLEEP, + HCI_IBS_RX_AWAKE, +}; + +/* HCI_IBS transmit and receive side clock state vote */ +enum hci_ibs_clock_state_vote { + HCI_IBS_VOTE_STATS_UPDATE, + HCI_IBS_TX_VOTE_CLOCK_ON, + HCI_IBS_TX_VOTE_CLOCK_OFF, + HCI_IBS_RX_VOTE_CLOCK_ON, + HCI_IBS_RX_VOTE_CLOCK_OFF, +}; + +struct qca_data { + struct hci_uart *hu; + struct sk_buff *rx_skb; + struct sk_buff_head txq; + struct sk_buff_head tx_wait_q; /* HCI_IBS wait queue */ + spinlock_t hci_ibs_lock; /* HCI_IBS state lock */ + u8 tx_ibs_state; /* HCI_IBS transmit side power state*/ + u8 rx_ibs_state; /* HCI_IBS receive side power state */ + u32 tx_vote; /* Clock must be on for TX */ + u32 rx_vote; /* Clock must be on for RX */ + struct timer_list tx_idle_timer; + u32 tx_idle_delay; + struct timer_list wake_retrans_timer; + u32 wake_retrans; + struct workqueue_struct *workqueue; + struct work_struct ws_awake_rx; + struct work_struct ws_awake_device; + struct work_struct ws_rx_vote_off; + struct work_struct ws_tx_vote_off; + unsigned long flags; + + /* For debugging purpose */ + u64 ibs_sent_wacks; + u64 ibs_sent_slps; + u64 ibs_sent_wakes; + u64 ibs_recv_wacks; + u64 ibs_recv_slps; + u64 ibs_recv_wakes; + u64 vote_last_jif; + u32 vote_on_ms; + u32 vote_off_ms; + u64 tx_votes_on; + u64 rx_votes_on; + u64 tx_votes_off; + u64 rx_votes_off; + u64 votes_on; + u64 votes_off; +}; + +static void __serial_clock_on(struct tty_struct *tty) +{ + /* TODO: Some chipset requires to enable UART clock on client + * side to save power consumption or manual work is required. + * Please put your code to control UART clock here if needed + */ +} + +static void __serial_clock_off(struct tty_struct *tty) +{ + /* TODO: Some chipset requires to disable UART clock on client + * side to save power consumption or manual work is required. + * Please put your code to control UART clock off here if needed + */ +} + +/* serial_clock_vote needs to be called with the ibs lock held */ +static void serial_clock_vote(unsigned long vote, struct hci_uart *hu) +{ + struct qca_data *qca = hu->priv; + unsigned int diff; + + bool old_vote = (qca->tx_vote | qca->rx_vote); + bool new_vote; + + switch (vote) { + case HCI_IBS_VOTE_STATS_UPDATE: + diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); + + if (old_vote) + qca->vote_off_ms += diff; + else + qca->vote_on_ms += diff; + return; + + case HCI_IBS_TX_VOTE_CLOCK_ON: + qca->tx_vote = true; + qca->tx_votes_on++; + new_vote = true; + break; + + case HCI_IBS_RX_VOTE_CLOCK_ON: + qca->rx_vote = true; + qca->rx_votes_on++; + new_vote = true; + break; + + case HCI_IBS_TX_VOTE_CLOCK_OFF: + qca->tx_vote = false; + qca->tx_votes_off++; + new_vote = qca->rx_vote | qca->tx_vote; + break; + + case HCI_IBS_RX_VOTE_CLOCK_OFF: + qca->rx_vote = false; + qca->rx_votes_off++; + new_vote = qca->rx_vote | qca->tx_vote; + break; + + default: + BT_ERR("Voting irregularity"); + return; + } + + if (new_vote != old_vote) { + if (new_vote) + __serial_clock_on(hu->tty); + else + __serial_clock_off(hu->tty); + + BT_DBG("Vote serial clock %s(%s)", new_vote? "true" : "false", + vote? "true" : "false"); + + diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); + + if (new_vote) { + qca->votes_on++; + qca->vote_off_ms += diff; + } else { + qca->votes_off++; + qca->vote_on_ms += diff; + } + qca->vote_last_jif = jiffies; + } +} + +/* Builds and sends an HCI_IBS command packet. + * These are very simple packets with only 1 cmd byte. + */ +static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu) +{ + int err = 0; + struct sk_buff *skb = NULL; + struct qca_data *qca = hu->priv; + + BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd); + + skb = bt_skb_alloc(1, GFP_ATOMIC); + if (!skb) { + BT_ERR("Failed to allocate memory for HCI_IBS packet"); + return -ENOMEM; + } + + /* Assign HCI_IBS type */ + *skb_put(skb, 1) = cmd; + + skb_queue_tail(&qca->txq, skb); + + return err; +} + +static void qca_wq_awake_device(struct work_struct *work) +{ + struct qca_data *qca = container_of(work, struct qca_data, + ws_awake_device); + struct hci_uart *hu = qca->hu; + unsigned long retrans_delay; + + BT_DBG("hu %p wq awake device", hu); + + /* Vote for serial clock */ + serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu); + + spin_lock(&qca->hci_ibs_lock); + + /* Send wake indication to device */ + if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) + BT_ERR("Failed to send WAKE to device"); + + qca->ibs_sent_wakes++; + + /* Start retransmit timer */ + retrans_delay = msecs_to_jiffies(qca->wake_retrans); + mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); + + spin_unlock(&qca->hci_ibs_lock); + + /* Actually send the packets */ + hci_uart_tx_wakeup(hu); +} + +static void qca_wq_awake_rx(struct work_struct *work) +{ + struct qca_data *qca = container_of(work, struct qca_data, + ws_awake_rx); + struct hci_uart *hu = qca->hu; + + BT_DBG("hu %p wq awake rx", hu); + + serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu); + + spin_lock(&qca->hci_ibs_lock); + qca->rx_ibs_state = HCI_IBS_RX_AWAKE; + + /* Always acknowledge device wake up, + * sending IBS message doesn't count as TX ON. + */ + if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) + BT_ERR("Failed to acknowledge device wake up"); + + qca->ibs_sent_wacks++; + + spin_unlock(&qca->hci_ibs_lock); + + /* Actually send the packets */ + hci_uart_tx_wakeup(hu); +} + +static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work) +{ + struct qca_data *qca = container_of(work, struct qca_data, + ws_rx_vote_off); + struct hci_uart *hu = qca->hu; + + BT_DBG("hu %p rx clock vote off", hu); + + serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu); +} + +static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work) +{ + struct qca_data *qca = container_of(work, struct qca_data, + ws_tx_vote_off); + struct hci_uart *hu = qca->hu; + + BT_DBG("hu %p tx clock vote off", hu); + + /* Run HCI tx handling unlocked */ + hci_uart_tx_wakeup(hu); + + /* Now that message queued to tty driver, vote for tty clocks off. + * It is up to the tty driver to pend the clocks off until tx done. + */ + serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu); +} + +static void hci_ibs_tx_idle_timeout(unsigned long arg) +{ + struct hci_uart *hu = (struct hci_uart *)arg; + struct qca_data *qca = hu->priv; + unsigned long flags; + + BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state); + + spin_lock_irqsave_nested(&qca->hci_ibs_lock, + flags, SINGLE_DEPTH_NESTING); + + switch (qca->tx_ibs_state) { + case HCI_IBS_TX_AWAKE: + /* TX_IDLE, go to SLEEP */ + if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) { + BT_ERR("Failed to send SLEEP to device"); + break; + } + qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; + qca->ibs_sent_slps++; + queue_work(qca->workqueue, &qca->ws_tx_vote_off); + break; + + case HCI_IBS_TX_ASLEEP: + case HCI_IBS_TX_WAKING: + /* Fall through */ + + default: + BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state); + break; + } + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); +} + +static void hci_ibs_wake_retrans_timeout(unsigned long arg) +{ + struct hci_uart *hu = (struct hci_uart *)arg; + struct qca_data *qca = hu->priv; + unsigned long flags, retrans_delay; + unsigned long retransmit = 0; + + BT_DBG("hu %p wake retransmit timeout in %d state", + hu, qca->tx_ibs_state); + + spin_lock_irqsave_nested(&qca->hci_ibs_lock, + flags, SINGLE_DEPTH_NESTING); + + switch (qca->tx_ibs_state) { + case HCI_IBS_TX_WAKING: + /* No WAKE_ACK, retransmit WAKE */ + retransmit = 1; + if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) { + BT_ERR("Failed to acknowledge device wake up"); + break; + } + qca->ibs_sent_wakes++; + retrans_delay = msecs_to_jiffies(qca->wake_retrans); + mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); + break; + + case HCI_IBS_TX_ASLEEP: + case HCI_IBS_TX_AWAKE: + /* Fall through */ + + default: + BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state); + break; + } + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + + if (retransmit) + hci_uart_tx_wakeup(hu); +} + +/* Initialize protocol */ +static int qca_open(struct hci_uart *hu) +{ + struct qca_data *qca; + + BT_DBG("hu %p qca_open", hu); + + qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC); + if (!qca) + return -ENOMEM; + + skb_queue_head_init(&qca->txq); + skb_queue_head_init(&qca->tx_wait_q); + spin_lock_init(&qca->hci_ibs_lock); + qca->workqueue = create_singlethread_workqueue("qca_wq"); + if (!qca->workqueue) { + BT_ERR("QCA Workqueue not initialized properly"); + kfree(qca); + return -ENOMEM; + } + + INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx); + INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device); + INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off); + INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off); + + qca->hu = hu; + + /* Assume we start with both sides asleep -- extra wakes OK */ + qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; + qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; + + /* clocks actually on, but we start votes off */ + qca->tx_vote = false; + qca->rx_vote = false; + qca->flags = 0; + + qca->ibs_sent_wacks = 0; + qca->ibs_sent_slps = 0; + qca->ibs_sent_wakes = 0; + qca->ibs_recv_wacks = 0; + qca->ibs_recv_slps = 0; + qca->ibs_recv_wakes = 0; + qca->vote_last_jif = jiffies; + qca->vote_on_ms = 0; + qca->vote_off_ms = 0; + qca->votes_on = 0; + qca->votes_off = 0; + qca->tx_votes_on = 0; + qca->tx_votes_off = 0; + qca->rx_votes_on = 0; + qca->rx_votes_off = 0; + + hu->priv = qca; + + init_timer(&qca->wake_retrans_timer); + qca->wake_retrans_timer.function = hci_ibs_wake_retrans_timeout; + qca->wake_retrans_timer.data = (u_long)hu; + qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS; + + init_timer(&qca->tx_idle_timer); + qca->tx_idle_timer.function = hci_ibs_tx_idle_timeout; + qca->tx_idle_timer.data = (u_long)hu; + qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS; + + BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u", + qca->tx_idle_delay, qca->wake_retrans); + + return 0; +} + +static void qca_debugfs_init(struct hci_dev *hdev) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct qca_data *qca = hu->priv; + struct dentry *ibs_dir; + umode_t mode; + + if (!hdev->debugfs) + return; + + ibs_dir = debugfs_create_dir("ibs", hdev->debugfs); + + /* read only */ + mode = S_IRUGO; + debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state); + debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state); + debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir, + &qca->ibs_sent_slps); + debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir, + &qca->ibs_sent_wakes); + debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir, + &qca->ibs_sent_wacks); + debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir, + &qca->ibs_recv_slps); + debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir, + &qca->ibs_recv_wakes); + debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir, + &qca->ibs_recv_wacks); + debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote); + debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on); + debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off); + debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote); + debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on); + debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off); + debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on); + debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off); + debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms); + debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms); + + /* read/write */ + mode = S_IRUGO | S_IWUSR; + debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans); + debugfs_create_u32("tx_idle_delay", mode, ibs_dir, + &qca->tx_idle_delay); +} + +/* Flush protocol data */ +static int qca_flush(struct hci_uart *hu) +{ + struct qca_data *qca = hu->priv; + + BT_DBG("hu %p qca flush", hu); + + skb_queue_purge(&qca->tx_wait_q); + skb_queue_purge(&qca->txq); + + return 0; +} + +/* Close protocol */ +static int qca_close(struct hci_uart *hu) +{ + struct qca_data *qca = hu->priv; + + BT_DBG("hu %p qca close", hu); + + serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu); + + skb_queue_purge(&qca->tx_wait_q); + skb_queue_purge(&qca->txq); + del_timer(&qca->tx_idle_timer); + del_timer(&qca->wake_retrans_timer); + destroy_workqueue(qca->workqueue); + qca->hu = NULL; + + kfree_skb(qca->rx_skb); + + hu->priv = NULL; + + kfree(qca); + + return 0; +} + +/* Called upon a wake-up-indication from the device. + */ +static void device_want_to_wakeup(struct hci_uart *hu) +{ + unsigned long flags; + struct qca_data *qca = hu->priv; + + BT_DBG("hu %p want to wake up", hu); + + spin_lock_irqsave(&qca->hci_ibs_lock, flags); + + qca->ibs_recv_wakes++; + + switch (qca->rx_ibs_state) { + case HCI_IBS_RX_ASLEEP: + /* Make sure clock is on - we may have turned clock off since + * receiving the wake up indicator awake rx clock. + */ + queue_work(qca->workqueue, &qca->ws_awake_rx); + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + return; + + case HCI_IBS_RX_AWAKE: + /* Always acknowledge device wake up, + * sending IBS message doesn't count as TX ON. + */ + if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) { + BT_ERR("Failed to acknowledge device wake up"); + break; + } + qca->ibs_sent_wacks++; + break; + + default: + /* Any other state is illegal */ + BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d", + qca->rx_ibs_state); + break; + } + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + + /* Actually send the packets */ + hci_uart_tx_wakeup(hu); +} + +/* Called upon a sleep-indication from the device. + */ +static void device_want_to_sleep(struct hci_uart *hu) +{ + unsigned long flags; + struct qca_data *qca = hu->priv; + + BT_DBG("hu %p want to sleep", hu); + + spin_lock_irqsave(&qca->hci_ibs_lock, flags); + + qca->ibs_recv_slps++; + + switch (qca->rx_ibs_state) { + case HCI_IBS_RX_AWAKE: + /* Update state */ + qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; + /* Vote off rx clock under workqueue */ + queue_work(qca->workqueue, &qca->ws_rx_vote_off); + break; + + case HCI_IBS_RX_ASLEEP: + /* Fall through */ + + default: + /* Any other state is illegal */ + BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d", + qca->rx_ibs_state); + break; + } + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); +} + +/* Called upon wake-up-acknowledgement from the device + */ +static void device_woke_up(struct hci_uart *hu) +{ + unsigned long flags, idle_delay; + struct qca_data *qca = hu->priv; + struct sk_buff *skb = NULL; + + BT_DBG("hu %p woke up", hu); + + spin_lock_irqsave(&qca->hci_ibs_lock, flags); + + qca->ibs_recv_wacks++; + + switch (qca->tx_ibs_state) { + case HCI_IBS_TX_AWAKE: + /* Expect one if we send 2 WAKEs */ + BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d", + qca->tx_ibs_state); + break; + + case HCI_IBS_TX_WAKING: + /* Send pending packets */ + while ((skb = skb_dequeue(&qca->tx_wait_q))) + skb_queue_tail(&qca->txq, skb); + + /* Switch timers and change state to HCI_IBS_TX_AWAKE */ + del_timer(&qca->wake_retrans_timer); + idle_delay = msecs_to_jiffies(qca->tx_idle_delay); + mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); + qca->tx_ibs_state = HCI_IBS_TX_AWAKE; + break; + + case HCI_IBS_TX_ASLEEP: + /* Fall through */ + + default: + BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d", + qca->tx_ibs_state); + break; + } + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + + /* Actually send the packets */ + hci_uart_tx_wakeup(hu); +} + +/* Enqueue frame for transmittion (padding, crc, etc) may be called from + * two simultaneous tasklets. + */ +static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + unsigned long flags = 0, idle_delay; + struct qca_data *qca = hu->priv; + + BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb, + qca->tx_ibs_state); + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + + /* Don't go to sleep in middle of patch download or + * Out-Of-Band(GPIOs control) sleep is selected. + */ + if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) { + skb_queue_tail(&qca->txq, skb); + return 0; + } + + spin_lock_irqsave(&qca->hci_ibs_lock, flags); + + /* Act according to current state */ + switch (qca->tx_ibs_state) { + case HCI_IBS_TX_AWAKE: + BT_DBG("Device awake, sending normally"); + skb_queue_tail(&qca->txq, skb); + idle_delay = msecs_to_jiffies(qca->tx_idle_delay); + mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); + break; + + case HCI_IBS_TX_ASLEEP: + BT_DBG("Device asleep, waking up and queueing packet"); + /* Save packet for later */ + skb_queue_tail(&qca->tx_wait_q, skb); + + qca->tx_ibs_state = HCI_IBS_TX_WAKING; + /* Schedule a work queue to wake up device */ + queue_work(qca->workqueue, &qca->ws_awake_device); + break; + + case HCI_IBS_TX_WAKING: + BT_DBG("Device waking up, queueing packet"); + /* Transient state; just keep packet for later */ + skb_queue_tail(&qca->tx_wait_q, skb); + break; + + default: + BT_ERR("Illegal tx state: %d (losing packet)", + qca->tx_ibs_state); + kfree_skb(skb); + break; + } + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + + return 0; +} + +static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + + BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND); + + device_want_to_sleep(hu); + + kfree_skb(skb); + return 0; +} + +static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + + BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND); + + device_want_to_wakeup(hu); + + kfree_skb(skb); + return 0; +} + +static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + + BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK); + + device_woke_up(hu); + + kfree_skb(skb); + return 0; +} + +#define QCA_IBS_SLEEP_IND_EVENT \ + .type = HCI_IBS_SLEEP_IND, \ + .hlen = 0, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = HCI_MAX_IBS_SIZE + +#define QCA_IBS_WAKE_IND_EVENT \ + .type = HCI_IBS_WAKE_IND, \ + .hlen = 0, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = HCI_MAX_IBS_SIZE + +#define QCA_IBS_WAKE_ACK_EVENT \ + .type = HCI_IBS_WAKE_ACK, \ + .hlen = 0, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = HCI_MAX_IBS_SIZE + +static const struct h4_recv_pkt qca_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = hci_recv_frame }, + { QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind }, + { QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack }, + { QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind }, +}; + +static int qca_recv(struct hci_uart *hu, const void *data, int count) +{ + struct qca_data *qca = hu->priv; + + if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) + return -EUNATCH; + + qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count, + qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts)); + if (IS_ERR(qca->rx_skb)) { + int err = PTR_ERR(qca->rx_skb); + BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err); + qca->rx_skb = NULL; + return err; + } + + return count; +} + +static struct sk_buff *qca_dequeue(struct hci_uart *hu) +{ + struct qca_data *qca = hu->priv; + + return skb_dequeue(&qca->txq); +} + +static uint8_t qca_get_baudrate_value(int speed) +{ + switch(speed) { + case 9600: + return QCA_BAUDRATE_9600; + case 19200: + return QCA_BAUDRATE_19200; + case 38400: + return QCA_BAUDRATE_38400; + case 57600: + return QCA_BAUDRATE_57600; + case 115200: + return QCA_BAUDRATE_115200; + case 230400: + return QCA_BAUDRATE_230400; + case 460800: + return QCA_BAUDRATE_460800; + case 500000: + return QCA_BAUDRATE_500000; + case 921600: + return QCA_BAUDRATE_921600; + case 1000000: + return QCA_BAUDRATE_1000000; + case 2000000: + return QCA_BAUDRATE_2000000; + case 3000000: + return QCA_BAUDRATE_3000000; + case 3500000: + return QCA_BAUDRATE_3500000; + default: + return QCA_BAUDRATE_115200; + } +} + +static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct qca_data *qca = hu->priv; + struct sk_buff *skb; + u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 }; + + if (baudrate > QCA_BAUDRATE_3000000) + return -EINVAL; + + cmd[4] = baudrate; + + skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC); + if (!skb) { + BT_ERR("Failed to allocate memory for baudrate packet"); + return -ENOMEM; + } + + /* Assign commands to change baudrate and packet type. */ + memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd)); + bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; + + skb_queue_tail(&qca->txq, skb); + hci_uart_tx_wakeup(hu); + + /* wait 300ms to change new baudrate on controller side + * controller will come back after they receive this HCI command + * then host can communicate with new baudrate to controller + */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS)); + set_current_state(TASK_INTERRUPTIBLE); + + return 0; +} + +static int qca_setup(struct hci_uart *hu) +{ + struct hci_dev *hdev = hu->hdev; + struct qca_data *qca = hu->priv; + unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200; + int ret; + + BT_INFO("%s: ROME setup", hdev->name); + + /* Patch downloading has to be done without IBS mode */ + clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); + + /* Setup initial baudrate */ + speed = 0; + if (hu->init_speed) + speed = hu->init_speed; + else if (hu->proto->init_speed) + speed = hu->proto->init_speed; + + if (speed) + hci_uart_set_baudrate(hu, speed); + + /* Setup user speed if needed */ + speed = 0; + if (hu->oper_speed) + speed = hu->oper_speed; + else if (hu->proto->oper_speed) + speed = hu->proto->oper_speed; + + if (speed) { + qca_baudrate = qca_get_baudrate_value(speed); + + BT_INFO("%s: Set UART speed to %d", hdev->name, speed); + ret = qca_set_baudrate(hdev, qca_baudrate); + if (ret) { + BT_ERR("%s: Failed to change the baud rate (%d)", + hdev->name, ret); + return ret; + } + hci_uart_set_baudrate(hu, speed); + } + + /* Setup patch / NVM configurations */ + ret = qca_uart_setup_rome(hdev, qca_baudrate); + if (!ret) { + set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); + qca_debugfs_init(hdev); + } + + /* Setup bdaddr */ + hu->hdev->set_bdaddr = qca_set_bdaddr_rome; + + return ret; +} + +static struct hci_uart_proto qca_proto = { + .id = HCI_UART_QCA, + .name = "QCA", + .init_speed = 115200, + .oper_speed = 3000000, + .open = qca_open, + .close = qca_close, + .flush = qca_flush, + .setup = qca_setup, + .recv = qca_recv, + .enqueue = qca_enqueue, + .dequeue = qca_dequeue, +}; + +int __init qca_init(void) +{ + return hci_uart_register_proto(&qca_proto); +} + +int __exit qca_deinit(void) +{ + return hci_uart_unregister_proto(&qca_proto); +} diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index ce9c67095..495b9ef52 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -35,7 +35,7 @@ #define HCIUARTGETFLAGS _IOR('U', 204, int) /* UART protocols */ -#define HCI_UART_MAX_PROTO 8 +#define HCI_UART_MAX_PROTO 9 #define HCI_UART_H4 0 #define HCI_UART_BCSP 1 @@ -45,6 +45,7 @@ #define HCI_UART_ATH3K 5 #define HCI_UART_INTEL 6 #define HCI_UART_BCM 7 +#define HCI_UART_QCA 8 #define HCI_UART_RAW_DEVICE 0 #define HCI_UART_RESET_ON_INIT 1 @@ -167,7 +168,17 @@ int h5_init(void); int h5_deinit(void); #endif +#ifdef CONFIG_BT_HCIUART_INTEL +int intel_init(void); +int intel_deinit(void); +#endif + #ifdef CONFIG_BT_HCIUART_BCM int bcm_init(void); int bcm_deinit(void); #endif + +#ifdef CONFIG_BT_HCIUART_QCA +int qca_init(void); +int qca_deinit(void); +#endif diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 1a82f3a17..0ebca8ba7 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -36,7 +36,6 @@ config ARM_CCI400_PORT_CTRL config ARM_CCI500_PMU bool "ARM CCI500 PMU support" - default y depends on (ARM && CPU_V7) || ARM64 depends on PERF_EVENTS select ARM_CCI_PMU diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index 7d9879e16..7082c7268 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c @@ -1184,11 +1184,12 @@ static int arm_ccn_pmu_cpu_notifier(struct notifier_block *nb, if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) break; target = cpumask_any_but(cpu_online_mask, cpu); - if (target < 0) + if (target >= nr_cpu_ids) break; perf_pmu_migrate_context(&dt->pmu, cpu, target); cpumask_set_cpu(target, &dt->cpu); - WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0); + if (ccn->irq) + WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0); default: break; } diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c index ab3bde16e..1c543effe 100644 --- a/drivers/bus/mips_cdmm.c +++ b/drivers/bus/mips_cdmm.c @@ -331,6 +331,18 @@ static phys_addr_t mips_cdmm_cur_base(void) << MIPS_CDMMBASE_ADDR_START; } +/** + * mips_cdmm_phys_base() - Choose a physical base address for CDMM region. + * + * Picking a suitable physical address at which to map the CDMM region is + * platform specific, so this weak function can be overridden by platform + * code to pick a suitable value if none is configured by the bootloader. + */ +phys_addr_t __weak mips_cdmm_phys_base(void) +{ + return 0; +} + /** * mips_cdmm_setup() - Ensure the CDMM bus is initialised and usable. * @bus: Pointer to bus information for current CPU. @@ -368,7 +380,7 @@ static int mips_cdmm_setup(struct mips_cdmm_bus *bus) if (!bus->phys) bus->phys = mips_cdmm_cur_base(); /* Otherwise, ask platform code for suggestions */ - if (!bus->phys && mips_cdmm_phys_base) + if (!bus->phys) bus->phys = mips_cdmm_phys_base(); /* Otherwise, copy what other CPUs have done */ if (!bus->phys) diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c index a64763b6b..6575c0fe6 100644 --- a/drivers/bus/vexpress-config.c +++ b/drivers/bus/vexpress-config.c @@ -107,7 +107,7 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev) if (!res) return ERR_PTR(-ENOMEM); - regmap = bridge->ops->regmap_init(dev, bridge->context); + regmap = (bridge->ops->regmap_init)(dev, bridge->context); if (IS_ERR(regmap)) { devres_free(res); return regmap; diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index c6dea3f69..1341a94cc 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1408,8 +1408,8 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, } EXPORT_SYMBOL(intel_gmch_probe); -void intel_gtt_get(size_t *gtt_total, size_t *stolen_size, - phys_addr_t *mappable_base, unsigned long *mappable_end) +void intel_gtt_get(u64 *gtt_total, size_t *stolen_size, + phys_addr_t *mappable_base, u64 *mappable_end) { *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT; *stolen_size = intel_private.stolen_size; diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c index c37cf754a..3c7764540 100644 --- a/drivers/char/hw_random/xgene-rng.c +++ b/drivers/char/hw_random/xgene-rng.c @@ -344,11 +344,12 @@ static int xgene_rng_probe(struct platform_device *pdev) if (IS_ERR(ctx->csr_base)) return PTR_ERR(ctx->csr_base); - ctx->irq = platform_get_irq(pdev, 0); - if (ctx->irq < 0) { + rc = platform_get_irq(pdev, 0); + if (rc < 0) { dev_err(&pdev->dev, "No IRQ resource\n"); - return ctx->irq; + return rc; } + ctx->irq = rc; dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d", ctx->csr_base, ctx->irq); diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c index 61e716166..feafdab73 100644 --- a/drivers/char/ipmi/ipmi_bt_sm.c +++ b/drivers/char/ipmi/ipmi_bt_sm.c @@ -694,7 +694,7 @@ static int bt_size(void) return sizeof(struct si_sm_data); } -struct si_sm_handlers bt_smi_handlers = { +const struct si_sm_handlers bt_smi_handlers = { .init_data = bt_init_data, .start_transaction = bt_start_transaction, .get_result = bt_get_result, diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c index 8c25f5968..1da61af7f 100644 --- a/drivers/char/ipmi/ipmi_kcs_sm.c +++ b/drivers/char/ipmi/ipmi_kcs_sm.c @@ -540,7 +540,7 @@ static void kcs_cleanup(struct si_sm_data *kcs) { } -struct si_sm_handlers kcs_smi_handlers = { +const struct si_sm_handlers kcs_smi_handlers = { .init_data = init_kcs_data, .start_transaction = start_kcs_transaction, .get_result = get_kcs_result, diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index bf75f6361..e3536da05 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -342,7 +342,7 @@ struct ipmi_smi { * an umpreemptible region to use this. You must fetch the * value into a local variable and make sure it is not NULL. */ - struct ipmi_smi_handlers *handlers; + const struct ipmi_smi_handlers *handlers; void *send_info; #ifdef CONFIG_PROC_FS @@ -744,7 +744,13 @@ static void deliver_response(struct ipmi_recv_msg *msg) ipmi_inc_stat(intf, unhandled_local_responses); } ipmi_free_recv_msg(msg); - } else { + } else if (!oops_in_progress) { + /* + * If we are running in the panic context, calling the + * receive handler doesn't much meaning and has a deadlock + * risk. At this moment, simply skip it in that case. + */ + ipmi_user_t user = msg->user; user->handler->ipmi_recv_hndl(msg, user->handler_data); } @@ -1015,7 +1021,7 @@ int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) { int rv = 0; ipmi_smi_t intf; - struct ipmi_smi_handlers *handlers; + const struct ipmi_smi_handlers *handlers; mutex_lock(&ipmi_interfaces_mutex); list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { @@ -1501,7 +1507,7 @@ static struct ipmi_smi_msg *smi_add_send_msg(ipmi_smi_t intf, } -static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers, +static void smi_send(ipmi_smi_t intf, const struct ipmi_smi_handlers *handlers, struct ipmi_smi_msg *smi_msg, int priority) { int run_to_completion = intf->run_to_completion; @@ -2747,7 +2753,7 @@ void ipmi_poll_interface(ipmi_user_t user) } EXPORT_SYMBOL(ipmi_poll_interface); -int ipmi_register_smi(struct ipmi_smi_handlers *handlers, +int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, void *send_info, struct ipmi_device_id *device_id, struct device *si_dev, @@ -3959,6 +3965,10 @@ free_msg: if (!run_to_completion) spin_lock_irqsave(&intf->xmit_msgs_lock, flags); + /* + * We can get an asynchronous event or receive message in addition + * to commands we send. + */ if (msg == intf->curr_msg) intf->curr_msg = NULL; if (!run_to_completion) @@ -4015,7 +4025,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, unsigned int *waiting_msgs) { struct ipmi_recv_msg *msg; - struct ipmi_smi_handlers *handlers; + const struct ipmi_smi_handlers *handlers; if (intf->in_shutdown) return; @@ -4082,7 +4092,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, ipmi_inc_stat(intf, retransmitted_ipmb_commands); - smi_send(intf, intf->handlers, smi_msg, 0); + smi_send(intf, handlers, smi_msg, 0); } else ipmi_free_smi_msg(smi_msg); @@ -4291,6 +4301,9 @@ static void ipmi_panic_request_and_wait(ipmi_smi_t intf, 0, 1); /* Don't retry, and don't wait. */ if (rv) atomic_sub(2, &panic_done_count); + else if (intf->handlers->flush_messages) + intf->handlers->flush_messages(intf->send_info); + while (atomic_read(&panic_done_count) != 0) ipmi_poll(intf); } @@ -4364,9 +4377,7 @@ static void send_panic_events(char *str) /* Interface is not ready. */ continue; - intf->run_to_completion = 1; /* Send the event announcing the panic. */ - intf->handlers->set_run_to_completion(intf->send_info, 1); ipmi_panic_request_and_wait(intf, &addr, &msg); } @@ -4506,6 +4517,23 @@ static int panic_event(struct notifier_block *this, /* Interface is not ready. */ continue; + /* + * If we were interrupted while locking xmit_msgs_lock or + * waiting_rcv_msgs_lock, the corresponding list may be + * corrupted. In this case, drop items on the list for + * the safety. + */ + if (!spin_trylock(&intf->xmit_msgs_lock)) { + INIT_LIST_HEAD(&intf->xmit_msgs); + INIT_LIST_HEAD(&intf->hp_xmit_msgs); + } else + spin_unlock(&intf->xmit_msgs_lock); + + if (!spin_trylock(&intf->waiting_rcv_msgs_lock)) + INIT_LIST_HEAD(&intf->waiting_rcv_msgs); + else + spin_unlock(&intf->waiting_rcv_msgs_lock); + intf->run_to_completion = 1; intf->handlers->set_run_to_completion(intf->send_info, 1); } diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c index 9b409c0f1..6e658aa11 100644 --- a/drivers/char/ipmi/ipmi_powernv.c +++ b/drivers/char/ipmi/ipmi_powernv.c @@ -143,8 +143,15 @@ static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi) pr_devel("%s: -> %d (size %lld)\n", __func__, rc, rc == 0 ? size : 0); if (rc) { + /* If came via the poll, and response was not yet ready */ + if (rc == OPAL_EMPTY) { + spin_unlock_irqrestore(&smi->msg_lock, flags); + return 0; + } + + smi->cur_msg = NULL; spin_unlock_irqrestore(&smi->msg_lock, flags); - ipmi_free_smi_msg(msg); + send_error_reply(smi, msg, IPMI_ERR_UNSPECIFIED); return 0; } @@ -300,7 +307,6 @@ static const struct of_device_id ipmi_powernv_match[] = { static struct platform_driver powernv_ipmi_driver = { .driver = { .name = "ipmi-powernv", - .owner = THIS_MODULE, .of_match_table = ipmi_powernv_match, }, .probe = ipmi_powernv_probe, diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 8a45e92ff..654f6f36a 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -64,7 +64,6 @@ #include #include #include -#include #include #include #include @@ -164,7 +163,7 @@ struct smi_info { int intf_num; ipmi_smi_t intf; struct si_sm_data *si_sm; - struct si_sm_handlers *handlers; + const struct si_sm_handlers *handlers; enum si_type si_type; spinlock_t si_lock; struct ipmi_smi_msg *waiting_msg; @@ -263,9 +262,21 @@ struct smi_info { bool supports_event_msg_buff; /* - * Can we clear the global enables receive irq bit? + * Can we disable interrupts the global enables receive irq + * bit? There are currently two forms of brokenness, some + * systems cannot disable the bit (which is technically within + * the spec but a bad idea) and some systems have the bit + * forced to zero even though interrupts work (which is + * clearly outside the spec). The next bool tells which form + * of brokenness is present. */ - bool cannot_clear_recv_irq_bit; + bool cannot_disable_irq; + + /* + * Some systems are broken and cannot set the irq enable + * bit, even if they support interrupts. + */ + bool irq_enable_broken; /* * Did we get an attention that we did not handle? @@ -309,9 +320,6 @@ static int num_force_kipmid; #ifdef CONFIG_PCI static bool pci_registered; #endif -#ifdef CONFIG_ACPI -static bool pnp_registered; -#endif #ifdef CONFIG_PARISC static bool parisc_registered; #endif @@ -558,13 +566,14 @@ static u8 current_global_enables(struct smi_info *smi_info, u8 base, if (smi_info->supports_event_msg_buff) enables |= IPMI_BMC_EVT_MSG_BUFF; - if ((smi_info->irq && !smi_info->interrupt_disabled) || - smi_info->cannot_clear_recv_irq_bit) + if (((smi_info->irq && !smi_info->interrupt_disabled) || + smi_info->cannot_disable_irq) && + !smi_info->irq_enable_broken) enables |= IPMI_BMC_RCV_MSG_INTR; if (smi_info->supports_event_msg_buff && - smi_info->irq && !smi_info->interrupt_disabled) - + smi_info->irq && !smi_info->interrupt_disabled && + !smi_info->irq_enable_broken) enables |= IPMI_BMC_EVT_MSG_INTR; *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR); @@ -928,33 +937,36 @@ static void check_start_timer_thread(struct smi_info *smi_info) } } +static void flush_messages(void *send_info) +{ + struct smi_info *smi_info = send_info; + enum si_sm_result result; + + /* + * Currently, this function is called only in run-to-completion + * mode. This means we are single-threaded, no need for locks. + */ + result = smi_event_handler(smi_info, 0); + while (result != SI_SM_IDLE) { + udelay(SI_SHORT_TIMEOUT_USEC); + result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC); + } +} + static void sender(void *send_info, struct ipmi_smi_msg *msg) { struct smi_info *smi_info = send_info; - enum si_sm_result result; unsigned long flags; debug_timestamp("Enqueue"); if (smi_info->run_to_completion) { /* - * If we are running to completion, start it and run - * transactions until everything is clear. + * If we are running to completion, start it. Upper + * layer will call flush_messages to clear it out. */ smi_info->waiting_msg = msg; - - /* - * Run to completion means we are single-threaded, no - * need for locks. - */ - - result = smi_event_handler(smi_info, 0); - while (result != SI_SM_IDLE) { - udelay(SI_SHORT_TIMEOUT_USEC); - result = smi_event_handler(smi_info, - SI_SHORT_TIMEOUT_USEC); - } return; } @@ -975,17 +987,10 @@ static void sender(void *send_info, static void set_run_to_completion(void *send_info, bool i_run_to_completion) { struct smi_info *smi_info = send_info; - enum si_sm_result result; smi_info->run_to_completion = i_run_to_completion; - if (i_run_to_completion) { - result = smi_event_handler(smi_info, 0); - while (result != SI_SM_IDLE) { - udelay(SI_SHORT_TIMEOUT_USEC); - result = smi_event_handler(smi_info, - SI_SHORT_TIMEOUT_USEC); - } - } + if (i_run_to_completion) + flush_messages(smi_info); } /* @@ -1258,7 +1263,7 @@ static void set_maintenance_mode(void *send_info, bool enable) atomic_set(&smi_info->req_events, 0); } -static struct ipmi_smi_handlers handlers = { +static const struct ipmi_smi_handlers handlers = { .owner = THIS_MODULE, .start_processing = smi_start_processing, .get_smi_info = get_smi_info, @@ -1267,6 +1272,7 @@ static struct ipmi_smi_handlers handlers = { .set_need_watch = set_need_watch, .set_maintenance_mode = set_maintenance_mode, .set_run_to_completion = set_run_to_completion, + .flush_messages = flush_messages, .poll = poll, }; @@ -1283,14 +1289,14 @@ static int smi_num; /* Used to sequence the SMIs */ #define DEFAULT_REGSIZE 1 #ifdef CONFIG_ACPI -static bool si_tryacpi = 1; +static bool si_tryacpi = true; #endif #ifdef CONFIG_DMI -static bool si_trydmi = 1; +static bool si_trydmi = true; #endif -static bool si_tryplatform = 1; +static bool si_tryplatform = true; #ifdef CONFIG_PCI -static bool si_trypci = 1; +static bool si_trypci = true; #endif static bool si_trydefaults = IS_ENABLED(CONFIG_IPMI_SI_PROBE_DEFAULTS); static char *si_type[SI_MAX_PARMS]; @@ -1446,14 +1452,14 @@ static int std_irq_setup(struct smi_info *info) return rv; } -static unsigned char port_inb(struct si_sm_io *io, unsigned int offset) +static unsigned char port_inb(const struct si_sm_io *io, unsigned int offset) { unsigned int addr = io->addr_data; return inb(addr + (offset * io->regspacing)); } -static void port_outb(struct si_sm_io *io, unsigned int offset, +static void port_outb(const struct si_sm_io *io, unsigned int offset, unsigned char b) { unsigned int addr = io->addr_data; @@ -1461,14 +1467,14 @@ static void port_outb(struct si_sm_io *io, unsigned int offset, outb(b, addr + (offset * io->regspacing)); } -static unsigned char port_inw(struct si_sm_io *io, unsigned int offset) +static unsigned char port_inw(const struct si_sm_io *io, unsigned int offset) { unsigned int addr = io->addr_data; return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; } -static void port_outw(struct si_sm_io *io, unsigned int offset, +static void port_outw(const struct si_sm_io *io, unsigned int offset, unsigned char b) { unsigned int addr = io->addr_data; @@ -1476,14 +1482,14 @@ static void port_outw(struct si_sm_io *io, unsigned int offset, outw(b << io->regshift, addr + (offset * io->regspacing)); } -static unsigned char port_inl(struct si_sm_io *io, unsigned int offset) +static unsigned char port_inl(const struct si_sm_io *io, unsigned int offset) { unsigned int addr = io->addr_data; return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; } -static void port_outl(struct si_sm_io *io, unsigned int offset, +static void port_outl(const struct si_sm_io *io, unsigned int offset, unsigned char b) { unsigned int addr = io->addr_data; @@ -1556,49 +1562,52 @@ static int port_setup(struct smi_info *info) return 0; } -static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset) +static unsigned char intf_mem_inb(const struct si_sm_io *io, + unsigned int offset) { return readb((io->addr)+(offset * io->regspacing)); } -static void intf_mem_outb(struct si_sm_io *io, unsigned int offset, - unsigned char b) +static void intf_mem_outb(const struct si_sm_io *io, unsigned int offset, + unsigned char b) { writeb(b, (io->addr)+(offset * io->regspacing)); } -static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset) +static unsigned char intf_mem_inw(const struct si_sm_io *io, + unsigned int offset) { return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } -static void intf_mem_outw(struct si_sm_io *io, unsigned int offset, - unsigned char b) +static void intf_mem_outw(const struct si_sm_io *io, unsigned int offset, + unsigned char b) { writeb(b << io->regshift, (io->addr)+(offset * io->regspacing)); } -static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset) +static unsigned char intf_mem_inl(const struct si_sm_io *io, + unsigned int offset) { return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } -static void intf_mem_outl(struct si_sm_io *io, unsigned int offset, - unsigned char b) +static void intf_mem_outl(const struct si_sm_io *io, unsigned int offset, + unsigned char b) { writel(b << io->regshift, (io->addr)+(offset * io->regspacing)); } #ifdef readq -static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset) +static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset) { return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } -static void mem_outq(struct si_sm_io *io, unsigned int offset, +static void mem_outq(const struct si_sm_io *io, unsigned int offset, unsigned char b) { writeq(b << io->regshift, (io->addr)+(offset * io->regspacing)); @@ -2233,134 +2242,6 @@ static void spmi_find_bmc(void) try_init_spmi(spmi); } } - -static int ipmi_pnp_probe(struct pnp_dev *dev, - const struct pnp_device_id *dev_id) -{ - struct acpi_device *acpi_dev; - struct smi_info *info; - struct resource *res, *res_second; - acpi_handle handle; - acpi_status status; - unsigned long long tmp; - int rv = -EINVAL; - - acpi_dev = pnp_acpi_device(dev); - if (!acpi_dev) - return -ENODEV; - - info = smi_info_alloc(); - if (!info) - return -ENOMEM; - - info->addr_source = SI_ACPI; - printk(KERN_INFO PFX "probing via ACPI\n"); - - handle = acpi_dev->handle; - info->addr_info.acpi_info.acpi_handle = handle; - - /* _IFT tells us the interface type: KCS, BT, etc */ - status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp); - if (ACPI_FAILURE(status)) { - dev_err(&dev->dev, "Could not find ACPI IPMI interface type\n"); - goto err_free; - } - - switch (tmp) { - case 1: - info->si_type = SI_KCS; - break; - case 2: - info->si_type = SI_SMIC; - break; - case 3: - info->si_type = SI_BT; - break; - case 4: /* SSIF, just ignore */ - rv = -ENODEV; - goto err_free; - default: - dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp); - goto err_free; - } - - res = pnp_get_resource(dev, IORESOURCE_IO, 0); - if (res) { - info->io_setup = port_setup; - info->io.addr_type = IPMI_IO_ADDR_SPACE; - } else { - res = pnp_get_resource(dev, IORESOURCE_MEM, 0); - if (res) { - info->io_setup = mem_setup; - info->io.addr_type = IPMI_MEM_ADDR_SPACE; - } - } - if (!res) { - dev_err(&dev->dev, "no I/O or memory address\n"); - goto err_free; - } - info->io.addr_data = res->start; - - info->io.regspacing = DEFAULT_REGSPACING; - res_second = pnp_get_resource(dev, - (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? - IORESOURCE_IO : IORESOURCE_MEM, - 1); - if (res_second) { - if (res_second->start > info->io.addr_data) - info->io.regspacing = res_second->start - info->io.addr_data; - } - info->io.regsize = DEFAULT_REGSPACING; - info->io.regshift = 0; - - /* If _GPE exists, use it; otherwise use standard interrupts */ - status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); - if (ACPI_SUCCESS(status)) { - info->irq = tmp; - info->irq_setup = acpi_gpe_irq_setup; - } else if (pnp_irq_valid(dev, 0)) { - info->irq = pnp_irq(dev, 0); - info->irq_setup = std_irq_setup; - } - - info->dev = &dev->dev; - pnp_set_drvdata(dev, info); - - dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n", - res, info->io.regsize, info->io.regspacing, - info->irq); - - rv = add_smi(info); - if (rv) - kfree(info); - - return rv; - -err_free: - kfree(info); - return rv; -} - -static void ipmi_pnp_remove(struct pnp_dev *dev) -{ - struct smi_info *info = pnp_get_drvdata(dev); - - cleanup_one_si(info); -} - -static const struct pnp_device_id pnp_dev_table[] = { - {"IPI0001", 0}, - {"", 0}, -}; - -static struct pnp_driver ipmi_pnp_driver = { - .name = DEVICE_NAME, - .probe = ipmi_pnp_probe, - .remove = ipmi_pnp_remove, - .id_table = pnp_dev_table, -}; - -MODULE_DEVICE_TABLE(pnp, pnp_dev_table); #endif #ifdef CONFIG_DMI @@ -2654,7 +2535,7 @@ static void ipmi_pci_remove(struct pci_dev *pdev) pci_disable_device(pdev); } -static struct pci_device_id ipmi_pci_devices[] = { +static const struct pci_device_id ipmi_pci_devices[] = { { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) }, { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }, { 0, } @@ -2669,10 +2550,19 @@ static struct pci_driver ipmi_pci_driver = { }; #endif /* CONFIG_PCI */ -static const struct of_device_id ipmi_match[]; -static int ipmi_probe(struct platform_device *dev) -{ #ifdef CONFIG_OF +static const struct of_device_id of_ipmi_match[] = { + { .type = "ipmi", .compatible = "ipmi-kcs", + .data = (void *)(unsigned long) SI_KCS }, + { .type = "ipmi", .compatible = "ipmi-smic", + .data = (void *)(unsigned long) SI_SMIC }, + { .type = "ipmi", .compatible = "ipmi-bt", + .data = (void *)(unsigned long) SI_BT }, + {}, +}; + +static int of_ipmi_probe(struct platform_device *dev) +{ const struct of_device_id *match; struct smi_info *info; struct resource resource; @@ -2683,9 +2573,9 @@ static int ipmi_probe(struct platform_device *dev) dev_info(&dev->dev, "probing via device tree\n"); - match = of_match_device(ipmi_match, &dev->dev); + match = of_match_device(of_ipmi_match, &dev->dev); if (!match) - return -EINVAL; + return -ENODEV; if (!of_device_is_available(np)) return -EINVAL; @@ -2754,33 +2644,160 @@ static int ipmi_probe(struct platform_device *dev) kfree(info); return ret; } -#endif return 0; } +MODULE_DEVICE_TABLE(of, of_ipmi_match); +#else +#define of_ipmi_match NULL +static int of_ipmi_probe(struct platform_device *dev) +{ + return -ENODEV; +} +#endif -static int ipmi_remove(struct platform_device *dev) +#ifdef CONFIG_ACPI +static int acpi_ipmi_probe(struct platform_device *dev) { -#ifdef CONFIG_OF - cleanup_one_si(dev_get_drvdata(&dev->dev)); + struct smi_info *info; + struct resource *res, *res_second; + acpi_handle handle; + acpi_status status; + unsigned long long tmp; + int rv = -EINVAL; + + handle = ACPI_HANDLE(&dev->dev); + if (!handle) + return -ENODEV; + + info = smi_info_alloc(); + if (!info) + return -ENOMEM; + + info->addr_source = SI_ACPI; + dev_info(&dev->dev, PFX "probing via ACPI\n"); + + info->addr_info.acpi_info.acpi_handle = handle; + + /* _IFT tells us the interface type: KCS, BT, etc */ + status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp); + if (ACPI_FAILURE(status)) { + dev_err(&dev->dev, "Could not find ACPI IPMI interface type\n"); + goto err_free; + } + + switch (tmp) { + case 1: + info->si_type = SI_KCS; + break; + case 2: + info->si_type = SI_SMIC; + break; + case 3: + info->si_type = SI_BT; + break; + case 4: /* SSIF, just ignore */ + rv = -ENODEV; + goto err_free; + default: + dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp); + goto err_free; + } + + res = platform_get_resource(dev, IORESOURCE_IO, 0); + if (res) { + info->io_setup = port_setup; + info->io.addr_type = IPMI_IO_ADDR_SPACE; + } else { + res = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (res) { + info->io_setup = mem_setup; + info->io.addr_type = IPMI_MEM_ADDR_SPACE; + } + } + if (!res) { + dev_err(&dev->dev, "no I/O or memory address\n"); + goto err_free; + } + info->io.addr_data = res->start; + + info->io.regspacing = DEFAULT_REGSPACING; + res_second = platform_get_resource(dev, + (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? + IORESOURCE_IO : IORESOURCE_MEM, + 1); + if (res_second) { + if (res_second->start > info->io.addr_data) + info->io.regspacing = + res_second->start - info->io.addr_data; + } + info->io.regsize = DEFAULT_REGSPACING; + info->io.regshift = 0; + + /* If _GPE exists, use it; otherwise use standard interrupts */ + status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); + if (ACPI_SUCCESS(status)) { + info->irq = tmp; + info->irq_setup = acpi_gpe_irq_setup; + } else { + int irq = platform_get_irq(dev, 0); + + if (irq > 0) { + info->irq = irq; + info->irq_setup = std_irq_setup; + } + } + + info->dev = &dev->dev; + platform_set_drvdata(dev, info); + + dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n", + res, info->io.regsize, info->io.regspacing, + info->irq); + + rv = add_smi(info); + if (rv) + kfree(info); + + return rv; + +err_free: + kfree(info); + return rv; +} + +static const struct acpi_device_id acpi_ipmi_match[] = { + { "IPI0001", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, acpi_ipmi_match); +#else +static int acpi_ipmi_probe(struct platform_device *dev) +{ + return -ENODEV; +} #endif - return 0; + +static int ipmi_probe(struct platform_device *dev) +{ + if (of_ipmi_probe(dev) == 0) + return 0; + + return acpi_ipmi_probe(dev); } -static const struct of_device_id ipmi_match[] = +static int ipmi_remove(struct platform_device *dev) { - { .type = "ipmi", .compatible = "ipmi-kcs", - .data = (void *)(unsigned long) SI_KCS }, - { .type = "ipmi", .compatible = "ipmi-smic", - .data = (void *)(unsigned long) SI_SMIC }, - { .type = "ipmi", .compatible = "ipmi-bt", - .data = (void *)(unsigned long) SI_BT }, - {}, -}; + struct smi_info *info = dev_get_drvdata(&dev->dev); + + cleanup_one_si(info); + return 0; +} static struct platform_driver ipmi_driver = { .driver = { .name = DEVICE_NAME, - .of_match_table = ipmi_match, + .of_match_table = of_ipmi_match, + .acpi_match_table = ACPI_PTR(acpi_ipmi_match), }, .probe = ipmi_probe, .remove = ipmi_remove, @@ -2905,12 +2922,7 @@ static int try_get_dev_id(struct smi_info *smi_info) return rv; } -/* - * Some BMCs do not support clearing the receive irq bit in the global - * enables (even if they don't support interrupts on the BMC). Check - * for this and handle it properly. - */ -static void check_clr_rcv_irq(struct smi_info *smi_info) +static int get_global_enables(struct smi_info *smi_info, u8 *enables) { unsigned char msg[3]; unsigned char *resp; @@ -2918,12 +2930,8 @@ static void check_clr_rcv_irq(struct smi_info *smi_info) int rv; resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); - if (!resp) { - printk(KERN_WARNING PFX "Out of memory allocating response for" - " global enables command, cannot check recv irq bit" - " handling.\n"); - return; - } + if (!resp) + return -ENOMEM; msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; @@ -2931,9 +2939,9 @@ static void check_clr_rcv_irq(struct smi_info *smi_info) rv = wait_for_msg_done(smi_info); if (rv) { - printk(KERN_WARNING PFX "Error getting response from get" - " global enables command, cannot check recv irq bit" - " handling.\n"); + dev_warn(smi_info->dev, + "Error getting response from get global enables command: %d\n", + rv); goto out; } @@ -2944,27 +2952,44 @@ static void check_clr_rcv_irq(struct smi_info *smi_info) resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || resp[2] != 0) { - printk(KERN_WARNING PFX "Invalid return from get global" - " enables command, cannot check recv irq bit" - " handling.\n"); + dev_warn(smi_info->dev, + "Invalid return from get global enables command: %ld %x %x %x\n", + resp_len, resp[0], resp[1], resp[2]); rv = -EINVAL; goto out; + } else { + *enables = resp[3]; } - if ((resp[3] & IPMI_BMC_RCV_MSG_INTR) == 0) - /* Already clear, should work ok. */ - goto out; +out: + kfree(resp); + return rv; +} + +/* + * Returns 1 if it gets an error from the command. + */ +static int set_global_enables(struct smi_info *smi_info, u8 enables) +{ + unsigned char msg[3]; + unsigned char *resp; + unsigned long resp_len; + int rv; + + resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); + if (!resp) + return -ENOMEM; msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; - msg[2] = resp[3] & ~IPMI_BMC_RCV_MSG_INTR; + msg[2] = enables; smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); rv = wait_for_msg_done(smi_info); if (rv) { - printk(KERN_WARNING PFX "Error getting response from set" - " global enables command, cannot check recv irq bit" - " handling.\n"); + dev_warn(smi_info->dev, + "Error getting response from set global enables command: %d\n", + rv); goto out; } @@ -2974,25 +2999,93 @@ static void check_clr_rcv_irq(struct smi_info *smi_info) if (resp_len < 3 || resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { - printk(KERN_WARNING PFX "Invalid return from get global" - " enables command, cannot check recv irq bit" - " handling.\n"); + dev_warn(smi_info->dev, + "Invalid return from set global enables command: %ld %x %x\n", + resp_len, resp[0], resp[1]); rv = -EINVAL; goto out; } - if (resp[2] != 0) { + if (resp[2] != 0) + rv = 1; + +out: + kfree(resp); + return rv; +} + +/* + * Some BMCs do not support clearing the receive irq bit in the global + * enables (even if they don't support interrupts on the BMC). Check + * for this and handle it properly. + */ +static void check_clr_rcv_irq(struct smi_info *smi_info) +{ + u8 enables = 0; + int rv; + + rv = get_global_enables(smi_info, &enables); + if (!rv) { + if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0) + /* Already clear, should work ok. */ + return; + + enables &= ~IPMI_BMC_RCV_MSG_INTR; + rv = set_global_enables(smi_info, enables); + } + + if (rv < 0) { + dev_err(smi_info->dev, + "Cannot check clearing the rcv irq: %d\n", rv); + return; + } + + if (rv) { /* * An error when setting the event buffer bit means * clearing the bit is not supported. */ - printk(KERN_WARNING PFX "The BMC does not support clearing" - " the recv irq bit, compensating, but the BMC needs to" - " be fixed.\n"); - smi_info->cannot_clear_recv_irq_bit = true; + dev_warn(smi_info->dev, + "The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n"); + smi_info->cannot_disable_irq = true; + } +} + +/* + * Some BMCs do not support setting the interrupt bits in the global + * enables even if they support interrupts. Clearly bad, but we can + * compensate. + */ +static void check_set_rcv_irq(struct smi_info *smi_info) +{ + u8 enables = 0; + int rv; + + if (!smi_info->irq) + return; + + rv = get_global_enables(smi_info, &enables); + if (!rv) { + enables |= IPMI_BMC_RCV_MSG_INTR; + rv = set_global_enables(smi_info, enables); + } + + if (rv < 0) { + dev_err(smi_info->dev, + "Cannot check setting the rcv irq: %d\n", rv); + return; + } + + if (rv) { + /* + * An error when setting the event buffer bit means + * setting the bit is not supported. + */ + dev_warn(smi_info->dev, + "The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n"); + smi_info->cannot_disable_irq = true; + smi_info->irq_enable_broken = true; } - out: - kfree(resp); } static int try_enable_event_buffer(struct smi_info *smi_info) @@ -3313,6 +3406,12 @@ static void setup_xaction_handlers(struct smi_info *smi_info) setup_dell_poweredge_bt_xaction_handler(smi_info); } +static void check_for_broken_irqs(struct smi_info *smi_info) +{ + check_clr_rcv_irq(smi_info); + check_set_rcv_irq(smi_info); +} + static inline void wait_for_timer_and_thread(struct smi_info *smi_info) { if (smi_info->thread != NULL) @@ -3321,7 +3420,7 @@ static inline void wait_for_timer_and_thread(struct smi_info *smi_info) del_timer_sync(&smi_info->si_timer); } -static struct ipmi_default_vals +static const struct ipmi_default_vals { int type; int port; @@ -3490,10 +3589,9 @@ static int try_smi_init(struct smi_info *new_smi) goto out_err; } - check_clr_rcv_irq(new_smi); - setup_oem_data_handler(new_smi); setup_xaction_handlers(new_smi); + check_for_broken_irqs(new_smi); new_smi->waiting_msg = NULL; new_smi->curr_msg = NULL; @@ -3692,13 +3790,6 @@ static int init_ipmi_si(void) } #endif -#ifdef CONFIG_ACPI - if (si_tryacpi) { - pnp_register_driver(&ipmi_pnp_driver); - pnp_registered = true; - } -#endif - #ifdef CONFIG_DMI if (si_trydmi) dmi_find_bmc(); @@ -3850,10 +3941,6 @@ static void cleanup_ipmi_si(void) if (pci_registered) pci_unregister_driver(&ipmi_pci_driver); #endif -#ifdef CONFIG_ACPI - if (pnp_registered) - pnp_unregister_driver(&ipmi_pnp_driver); -#endif #ifdef CONFIG_PARISC if (parisc_registered) unregister_parisc_driver(&ipmi_parisc_driver); diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h index df89f7347..a705027c0 100644 --- a/drivers/char/ipmi/ipmi_si_sm.h +++ b/drivers/char/ipmi/ipmi_si_sm.h @@ -46,8 +46,8 @@ struct si_sm_data; * this interface. */ struct si_sm_io { - unsigned char (*inputb)(struct si_sm_io *io, unsigned int offset); - void (*outputb)(struct si_sm_io *io, + unsigned char (*inputb)(const struct si_sm_io *io, unsigned int offset); + void (*outputb)(const struct si_sm_io *io, unsigned int offset, unsigned char b); @@ -135,7 +135,7 @@ struct si_sm_handlers { }; /* Current state machines that we can use. */ -extern struct si_sm_handlers kcs_smi_handlers; -extern struct si_sm_handlers smic_smi_handlers; -extern struct si_sm_handlers bt_smi_handlers; +extern const struct si_sm_handlers kcs_smi_handlers; +extern const struct si_sm_handlers smic_smi_handlers; +extern const struct si_sm_handlers bt_smi_handlers; diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c index c8e77afa8..8f7c73ff5 100644 --- a/drivers/char/ipmi/ipmi_smic_sm.c +++ b/drivers/char/ipmi/ipmi_smic_sm.c @@ -589,7 +589,7 @@ static int smic_size(void) return sizeof(struct si_sm_data); } -struct si_sm_handlers smic_smi_handlers = { +const struct si_sm_handlers smic_smi_handlers = { .init_data = init_smic_data, .start_transaction = start_smic_transaction, .get_result = smic_get_result, diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 207689c44..877205d22 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -1136,6 +1136,10 @@ module_param_array(slave_addrs, int, &num_slave_addrs, 0); MODULE_PARM_DESC(slave_addrs, "The default IPMB slave address for the controller."); +static bool alerts_broken; +module_param(alerts_broken, bool, 0); +MODULE_PARM_DESC(alerts_broken, "Don't enable alerts for the controller."); + /* * Bit 0 enables message debugging, bit 1 enables state debugging, and * bit 2 enables timing debugging. This is an array indexed by @@ -1154,11 +1158,11 @@ static int use_thread; module_param(use_thread, int, 0); MODULE_PARM_DESC(use_thread, "Use the thread interface."); -static bool ssif_tryacpi = 1; +static bool ssif_tryacpi = true; module_param_named(tryacpi, ssif_tryacpi, bool, 0); MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the default scan of the interfaces identified via ACPI"); -static bool ssif_trydmi = 1; +static bool ssif_trydmi = true; module_param_named(trydmi, ssif_trydmi, bool, 0); MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the default scan of the interfaces identified via DMI (SMBIOS)"); @@ -1582,6 +1586,10 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) ssif_info->global_enables |= IPMI_BMC_EVT_MSG_BUFF; } + /* Some systems don't behave well if you enable alerts. */ + if (alerts_broken) + goto found; + msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; msg[2] = ssif_info->global_enables | IPMI_BMC_RCV_MSG_INTR; @@ -1787,7 +1795,7 @@ skip_addr: } #ifdef CONFIG_ACPI -static struct acpi_device_id ssif_acpi_match[] = { +static const struct acpi_device_id ssif_acpi_match[] = { { "IPI0001", 0 }, { }, }; diff --git a/drivers/char/misc.c b/drivers/char/misc.c index fdb0f9b3f..8069b361b 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c @@ -243,17 +243,15 @@ int misc_register(struct miscdevice * misc) * @misc: device to unregister * * Unregister a miscellaneous device that was previously - * successfully registered with misc_register(). Success - * is indicated by a zero return, a negative errno code - * indicates an error. + * successfully registered with misc_register(). */ -int misc_deregister(struct miscdevice *misc) +void misc_deregister(struct miscdevice *misc) { int i = DYNAMIC_MINORS - misc->minor - 1; if (WARN_ON(list_empty(&misc->list))) - return -EINVAL; + return; mutex_lock(&misc_mtx); list_del(&misc->list); @@ -261,7 +259,6 @@ int misc_deregister(struct miscdevice *misc) if (i < DYNAMIC_MINORS && i >= 0) clear_bit(i, misc_minors); mutex_unlock(&misc_mtx); - return 0; } EXPORT_SYMBOL(misc_register); @@ -281,10 +278,9 @@ static char *misc_devnode(struct device *dev, umode_t *mode) static int __init misc_init(void) { int err; + struct proc_dir_entry *ret; -#ifdef CONFIG_PROC_FS - proc_create("misc", 0, NULL, &misc_proc_fops); -#endif + ret = proc_create("misc", 0, NULL, &misc_proc_fops); misc_class = class_create(THIS_MODULE, "misc"); err = PTR_ERR(misc_class); if (IS_ERR(misc_class)) @@ -300,7 +296,8 @@ fail_printk: printk("unable to get major %d for misc devices\n", MISC_MAJOR); class_destroy(misc_class); fail_remove: - remove_proc_entry("misc", NULL); + if (ret) + remove_proc_entry("misc", NULL); return err; } subsys_initcall(misc_init); diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index 9df78e2cc..97c2d8d43 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c @@ -702,7 +702,7 @@ static void atari_proc_infos(unsigned char *nvram, struct seq_file *seq, seq_printf(seq, "%ds%s\n", nvram[10], nvram[10] < 8 ? ", no memory test" : ""); - vmode = (nvram[14] << 8) || nvram[15]; + vmode = (nvram[14] << 8) | nvram[15]; seq_printf(seq, "Video mode : %s colors, %d columns, %s %s monitor\n", colors[vmode & 7], diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c index 014c9d90d..f5a45d887 100644 --- a/drivers/char/toshiba.c +++ b/drivers/char/toshiba.c @@ -430,7 +430,7 @@ static int tosh_probe(void) int i,major,minor,day,year,month,flag; unsigned char signature[7] = { 0x54,0x4f,0x53,0x48,0x49,0x42,0x41 }; SMMRegisters regs; - void __iomem *bios = ioremap_cache(0xf0000, 0x10000); + void __iomem *bios = ioremap(0xf0000, 0x10000); if (!bios) return -ENOMEM; diff --git a/drivers/char/xillybus/xillybus_pcie.c b/drivers/char/xillybus/xillybus_pcie.c index d8266bc2a..941830021 100644 --- a/drivers/char/xillybus/xillybus_pcie.c +++ b/drivers/char/xillybus/xillybus_pcie.c @@ -193,14 +193,16 @@ static int xilly_probe(struct pci_dev *pdev, } /* - * In theory, an attempt to set the DMA mask to 64 and dma_using_dac=1 - * is the right thing. But some unclever PCIe drivers report it's OK - * when the hardware drops those 64-bit PCIe packets. So trust - * nobody and use 32 bits DMA addressing in any case. + * Some (old and buggy?) hardware drops 64-bit addressed PCIe packets, + * even when the PCIe driver claims that a 64-bit mask is OK. On the + * other hand, on some architectures, 64-bit addressing is mandatory. + * So go for the 64-bit mask only when failing is the other option. */ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { endpoint->dma_using_dac = 0; + } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + endpoint->dma_using_dac = 1; } else { dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n"); return -ENODEV; diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index c4cf075a2..d08b3e598 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -9,7 +9,7 @@ obj-$(CONFIG_COMMON_CLK) += clk-gate.o obj-$(CONFIG_COMMON_CLK) += clk-mux.o obj-$(CONFIG_COMMON_CLK) += clk-composite.o obj-$(CONFIG_COMMON_CLK) += clk-fractional-divider.o -obj-$(CONFIG_COMMON_CLK) += clk-gpio-gate.o +obj-$(CONFIG_COMMON_CLK) += clk-gpio.o ifeq ($(CONFIG_OF), y) obj-$(CONFIG_COMMON_CLK) += clk-conf.o endif diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c index 27dfa965c..fd7247dea 100644 --- a/drivers/clk/at91/clk-main.c +++ b/drivers/clk/at91/clk-main.c @@ -614,17 +614,12 @@ void __init of_at91sam9x5_clk_main_setup(struct device_node *np, int num_parents; unsigned int irq; const char *name = np->name; - int i; num_parents = of_clk_get_parent_count(np); if (num_parents <= 0 || num_parents > 2) return; - for (i = 0; i < num_parents; ++i) { - parent_names[i] = of_clk_get_parent_name(np, i); - if (!parent_names[i]) - return; - } + of_clk_parent_fill(np, parent_names, num_parents); of_property_read_string(np, "clock-output-names", &name); diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c index 5b3ded520..620ea3233 100644 --- a/drivers/clk/at91/clk-master.c +++ b/drivers/clk/at91/clk-master.c @@ -222,7 +222,6 @@ of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc, { struct clk *clk; int num_parents; - int i; unsigned int irq; const char *parent_names[MASTER_SOURCE_MAX]; const char *name = np->name; @@ -232,11 +231,7 @@ of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc, if (num_parents <= 0 || num_parents > MASTER_SOURCE_MAX) return; - for (i = 0; i < num_parents; ++i) { - parent_names[i] = of_clk_get_parent_name(np, i); - if (!parent_names[i]) - return; - } + of_clk_parent_fill(np, parent_names, num_parents); of_property_read_string(np, "clock-output-names", &name); diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c index df2c1afa5..e4d7b574f 100644 --- a/drivers/clk/at91/clk-peripheral.c +++ b/drivers/clk/at91/clk-peripheral.c @@ -134,7 +134,7 @@ at91_clk_register_peripheral(struct at91_pmc *pmc, const char *name, static void clk_sam9x5_peripheral_autodiv(struct clk_sam9x5_peripheral *periph) { - struct clk *parent; + struct clk_hw *parent; unsigned long parent_rate; int shift = 0; @@ -142,8 +142,8 @@ static void clk_sam9x5_peripheral_autodiv(struct clk_sam9x5_peripheral *periph) return; if (periph->range.max) { - parent = clk_get_parent_by_index(periph->hw.clk, 0); - parent_rate = __clk_get_rate(parent); + parent = clk_hw_get_parent_by_index(&periph->hw, 0); + parent_rate = clk_hw_get_rate(parent); if (!parent_rate) return; diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c index 8c86c0f78..14b270b85 100644 --- a/drivers/clk/at91/clk-programmable.c +++ b/drivers/clk/at91/clk-programmable.c @@ -54,46 +54,47 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw, return parent_rate >> pres; } -static long clk_programmable_determine_rate(struct clk_hw *hw, - unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_hw) +static int clk_programmable_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { - struct clk *parent = NULL; + struct clk_hw *parent; long best_rate = -EINVAL; unsigned long parent_rate; unsigned long tmp_rate; int shift; int i; - for (i = 0; i < __clk_get_num_parents(hw->clk); i++) { - parent = clk_get_parent_by_index(hw->clk, i); + for (i = 0; i < clk_hw_get_num_parents(hw); i++) { + parent = clk_hw_get_parent_by_index(hw, i); if (!parent) continue; - parent_rate = __clk_get_rate(parent); + parent_rate = clk_hw_get_rate(parent); for (shift = 0; shift < PROG_PRES_MASK; shift++) { tmp_rate = parent_rate >> shift; - if (tmp_rate <= rate) + if (tmp_rate <= req->rate) break; } - if (tmp_rate > rate) + if (tmp_rate > req->rate) continue; - if (best_rate < 0 || (rate - tmp_rate) < (rate - best_rate)) { + if (best_rate < 0 || + (req->rate - tmp_rate) < (req->rate - best_rate)) { best_rate = tmp_rate; - *best_parent_rate = parent_rate; - *best_parent_hw = __clk_get_hw(parent); + req->best_parent_rate = parent_rate; + req->best_parent_hw = parent; } if (!best_rate) break; } - return best_rate; + if (best_rate < 0) + return best_rate; + + req->rate = best_rate; + return 0; } static int clk_programmable_set_parent(struct clk_hw *hw, u8 index) @@ -230,7 +231,6 @@ of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc, { int num; u32 id; - int i; struct clk *clk; int num_parents; const char *parent_names[PROG_SOURCE_MAX]; @@ -241,11 +241,7 @@ of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc, if (num_parents <= 0 || num_parents > PROG_SOURCE_MAX) return; - for (i = 0; i < num_parents; ++i) { - parent_names[i] = of_clk_get_parent_name(np, i); - if (!parent_names[i]) - return; - } + of_clk_parent_fill(np, parent_names, num_parents); num = of_get_child_count(np); if (!num || num > (PROG_ID_MAX + 1)) diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c index 98a84a865..d0d5076a9 100644 --- a/drivers/clk/at91/clk-slow.c +++ b/drivers/clk/at91/clk-slow.c @@ -10,8 +10,10 @@ * */ +#include #include #include +#include #include #include #include @@ -371,17 +373,12 @@ void __init of_at91sam9x5_clk_slow_setup(struct device_node *np, const char *parent_names[2]; int num_parents; const char *name = np->name; - int i; num_parents = of_clk_get_parent_count(np); if (num_parents <= 0 || num_parents > 2) return; - for (i = 0; i < num_parents; ++i) { - parent_names[i] = of_clk_get_parent_name(np, i); - if (!parent_names[i]) - return; - } + of_clk_parent_fill(np, parent_names, num_parents); of_property_read_string(np, "clock-output-names", &name); @@ -449,17 +446,12 @@ void __init of_at91sam9260_clk_slow_setup(struct device_node *np, const char *parent_names[2]; int num_parents; const char *name = np->name; - int i; num_parents = of_clk_get_parent_count(np); if (num_parents != 2) return; - for (i = 0; i < num_parents; ++i) { - parent_names[i] = of_clk_get_parent_name(np, i); - if (!parent_names[i]) - return; - } + of_clk_parent_fill(np, parent_names, num_parents); of_property_read_string(np, "clock-output-names", &name); diff --git a/drivers/clk/at91/clk-smd.c b/drivers/clk/at91/clk-smd.c index 3817ea865..a7f8501cf 100644 --- a/drivers/clk/at91/clk-smd.c +++ b/drivers/clk/at91/clk-smd.c @@ -145,7 +145,6 @@ void __init of_at91sam9x5_clk_smd_setup(struct device_node *np, struct at91_pmc *pmc) { struct clk *clk; - int i; int num_parents; const char *parent_names[SMD_SOURCE_MAX]; const char *name = np->name; @@ -154,11 +153,7 @@ void __init of_at91sam9x5_clk_smd_setup(struct device_node *np, if (num_parents <= 0 || num_parents > SMD_SOURCE_MAX) return; - for (i = 0; i < num_parents; i++) { - parent_names[i] = of_clk_get_parent_name(np, i); - if (!parent_names[i]) - return; - } + of_clk_parent_fill(np, parent_names, num_parents); of_property_read_string(np, "clock-output-names", &name); diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c index b0cbd2b1f..8ab850277 100644 --- a/drivers/clk/at91/clk-usb.c +++ b/drivers/clk/at91/clk-usb.c @@ -56,47 +56,43 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw, return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1)); } -static long at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw, - unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_hw) +static int at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { - struct clk *parent = NULL; + struct clk_hw *parent; long best_rate = -EINVAL; unsigned long tmp_rate; int best_diff = -1; int tmp_diff; int i; - for (i = 0; i < __clk_get_num_parents(hw->clk); i++) { + for (i = 0; i < clk_hw_get_num_parents(hw); i++) { int div; - parent = clk_get_parent_by_index(hw->clk, i); + parent = clk_hw_get_parent_by_index(hw, i); if (!parent) continue; for (div = 1; div < SAM9X5_USB_MAX_DIV + 2; div++) { unsigned long tmp_parent_rate; - tmp_parent_rate = rate * div; - tmp_parent_rate = __clk_round_rate(parent, + tmp_parent_rate = req->rate * div; + tmp_parent_rate = clk_hw_round_rate(parent, tmp_parent_rate); tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div); - if (tmp_rate < rate) - tmp_diff = rate - tmp_rate; + if (tmp_rate < req->rate) + tmp_diff = req->rate - tmp_rate; else - tmp_diff = tmp_rate - rate; + tmp_diff = tmp_rate - req->rate; if (best_diff < 0 || best_diff > tmp_diff) { best_rate = tmp_rate; best_diff = tmp_diff; - *best_parent_rate = tmp_parent_rate; - *best_parent_hw = __clk_get_hw(parent); + req->best_parent_rate = tmp_parent_rate; + req->best_parent_hw = parent; } - if (!best_diff || tmp_rate < rate) + if (!best_diff || tmp_rate < req->rate) break; } @@ -104,7 +100,11 @@ static long at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw, break; } - return best_rate; + if (best_rate < 0) + return best_rate; + + req->rate = best_rate; + return 0; } static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index) @@ -273,7 +273,7 @@ static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw); - struct clk *parent = __clk_get_parent(hw->clk); + struct clk_hw *parent = clk_hw_get_parent(hw); unsigned long bestrate = 0; int bestdiff = -1; unsigned long tmprate; @@ -287,7 +287,7 @@ static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate, continue; tmp_parent_rate = rate * usb->divisors[i]; - tmp_parent_rate = __clk_round_rate(parent, tmp_parent_rate); + tmp_parent_rate = clk_hw_round_rate(parent, tmp_parent_rate); tmprate = DIV_ROUND_CLOSEST(tmp_parent_rate, usb->divisors[i]); if (tmprate < rate) tmpdiff = rate - tmprate; @@ -373,7 +373,6 @@ void __init of_at91sam9x5_clk_usb_setup(struct device_node *np, struct at91_pmc *pmc) { struct clk *clk; - int i; int num_parents; const char *parent_names[USB_SOURCE_MAX]; const char *name = np->name; @@ -382,11 +381,7 @@ void __init of_at91sam9x5_clk_usb_setup(struct device_node *np, if (num_parents <= 0 || num_parents > USB_SOURCE_MAX) return; - for (i = 0; i < num_parents; i++) { - parent_names[i] = of_clk_get_parent_name(np, i); - if (!parent_names[i]) - return; - } + of_clk_parent_fill(np, parent_names, num_parents); of_property_read_string(np, "clock-output-names", &name); diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c index 39be2be82..d1844f1f3 100644 --- a/drivers/clk/at91/pmc.c +++ b/drivers/clk/at91/pmc.c @@ -125,7 +125,6 @@ static int pmc_irq_map(struct irq_domain *h, unsigned int virq, irq_set_chip_and_handler(virq, &pmc_irq, handle_level_irq); - set_irq_flags(virq, IRQF_VALID); irq_set_chip_data(virq, pmc); return 0; diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index eb8e5dc90..8b87771c6 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h @@ -59,71 +59,63 @@ static inline void pmc_write(struct at91_pmc *pmc, int offset, u32 value) int of_at91_get_clk_range(struct device_node *np, const char *propname, struct clk_range *range); -extern void __init of_at91sam9260_clk_slow_setup(struct device_node *np, - struct at91_pmc *pmc); - -extern void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np, - struct at91_pmc *pmc); -extern void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np, - struct at91_pmc *pmc); -extern void __init of_at91rm9200_clk_main_setup(struct device_node *np, - struct at91_pmc *pmc); -extern void __init of_at91sam9x5_clk_main_setup(struct device_node *np, - struct at91_pmc *pmc); - -extern void __init of_at91rm9200_clk_pll_setup(struct device_node *np, - struct at91_pmc *pmc); -extern void __init of_at91sam9g45_clk_pll_setup(struct device_node *np, - struct at91_pmc *pmc); -extern void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np, - struct at91_pmc *pmc); -extern void __init of_sama5d3_clk_pll_setup(struct device_node *np, - struct at91_pmc *pmc); -extern void __init of_at91sam9x5_clk_plldiv_setup(struct device_node *np, - struct at91_pmc *pmc); - -extern void __init of_at91rm9200_clk_master_setup(struct device_node *np, - struct at91_pmc *pmc); -extern void __init of_at91sam9x5_clk_master_setup(struct device_node *np, - struct at91_pmc *pmc); - -extern void __init of_at91rm9200_clk_sys_setup(struct device_node *np, - struct at91_pmc *pmc); - -extern void __init of_at91rm9200_clk_periph_setup(struct device_node *np, - struct at91_pmc *pmc); -extern void __init of_at91sam9x5_clk_periph_setup(struct device_node *np, - struct at91_pmc *pmc); - -extern void __init of_at91rm9200_clk_prog_setup(struct device_node *np, - struct at91_pmc *pmc); -extern void __init of_at91sam9g45_clk_prog_setup(struct device_node *np, - struct at91_pmc *pmc); -extern void __init of_at91sam9x5_clk_prog_setup(struct device_node *np, - struct at91_pmc *pmc); - -#if defined(CONFIG_HAVE_AT91_UTMI) -extern void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np, - struct at91_pmc *pmc); -#endif - -#if defined(CONFIG_HAVE_AT91_USB_CLK) -extern void __init of_at91rm9200_clk_usb_setup(struct device_node *np, - struct at91_pmc *pmc); -extern void __init of_at91sam9x5_clk_usb_setup(struct device_node *np, - struct at91_pmc *pmc); -extern void __init of_at91sam9n12_clk_usb_setup(struct device_node *np, - struct at91_pmc *pmc); -#endif - -#if defined(CONFIG_HAVE_AT91_SMD) -extern void __init of_at91sam9x5_clk_smd_setup(struct device_node *np, - struct at91_pmc *pmc); -#endif - -#if defined(CONFIG_HAVE_AT91_H32MX) -extern void __init of_sama5d4_clk_h32mx_setup(struct device_node *np, - struct at91_pmc *pmc); -#endif +void of_at91sam9260_clk_slow_setup(struct device_node *np, + struct at91_pmc *pmc); + +void of_at91rm9200_clk_main_osc_setup(struct device_node *np, + struct at91_pmc *pmc); +void of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np, + struct at91_pmc *pmc); +void of_at91rm9200_clk_main_setup(struct device_node *np, + struct at91_pmc *pmc); +void of_at91sam9x5_clk_main_setup(struct device_node *np, + struct at91_pmc *pmc); + +void of_at91rm9200_clk_pll_setup(struct device_node *np, + struct at91_pmc *pmc); +void of_at91sam9g45_clk_pll_setup(struct device_node *np, + struct at91_pmc *pmc); +void of_at91sam9g20_clk_pllb_setup(struct device_node *np, + struct at91_pmc *pmc); +void of_sama5d3_clk_pll_setup(struct device_node *np, + struct at91_pmc *pmc); +void of_at91sam9x5_clk_plldiv_setup(struct device_node *np, + struct at91_pmc *pmc); + +void of_at91rm9200_clk_master_setup(struct device_node *np, + struct at91_pmc *pmc); +void of_at91sam9x5_clk_master_setup(struct device_node *np, + struct at91_pmc *pmc); + +void of_at91rm9200_clk_sys_setup(struct device_node *np, + struct at91_pmc *pmc); + +void of_at91rm9200_clk_periph_setup(struct device_node *np, + struct at91_pmc *pmc); +void of_at91sam9x5_clk_periph_setup(struct device_node *np, + struct at91_pmc *pmc); + +void of_at91rm9200_clk_prog_setup(struct device_node *np, + struct at91_pmc *pmc); +void of_at91sam9g45_clk_prog_setup(struct device_node *np, + struct at91_pmc *pmc); +void of_at91sam9x5_clk_prog_setup(struct device_node *np, + struct at91_pmc *pmc); + +void of_at91sam9x5_clk_utmi_setup(struct device_node *np, + struct at91_pmc *pmc); + +void of_at91rm9200_clk_usb_setup(struct device_node *np, + struct at91_pmc *pmc); +void of_at91sam9x5_clk_usb_setup(struct device_node *np, + struct at91_pmc *pmc); +void of_at91sam9n12_clk_usb_setup(struct device_node *np, + struct at91_pmc *pmc); + +void of_at91sam9x5_clk_smd_setup(struct device_node *np, + struct at91_pmc *pmc); + +void of_sama5d4_clk_h32mx_setup(struct device_node *np, + struct at91_pmc *pmc); #endif /* __PMC_H_ */ diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c index 2dda4e829..d679ab869 100644 --- a/drivers/clk/bcm/clk-iproc-pll.c +++ b/drivers/clk/bcm/clk-iproc-pll.c @@ -345,8 +345,8 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw, struct iproc_pll *pll = clk->pll; const struct iproc_pll_ctrl *ctrl = pll->ctrl; u32 val; - u64 ndiv; - unsigned int ndiv_int, ndiv_frac, pdiv; + u64 ndiv, ndiv_int, ndiv_frac; + unsigned int pdiv; if (parent_rate == 0) return 0; @@ -366,22 +366,19 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw, val = readl(pll->pll_base + ctrl->ndiv_int.offset); ndiv_int = (val >> ctrl->ndiv_int.shift) & bit_mask(ctrl->ndiv_int.width); - ndiv = (u64)ndiv_int << ctrl->ndiv_int.shift; + ndiv = ndiv_int << 20; if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) { val = readl(pll->pll_base + ctrl->ndiv_frac.offset); ndiv_frac = (val >> ctrl->ndiv_frac.shift) & bit_mask(ctrl->ndiv_frac.width); - - if (ndiv_frac != 0) - ndiv = ((u64)ndiv_int << ctrl->ndiv_int.shift) | - ndiv_frac; + ndiv += ndiv_frac; } val = readl(pll->pll_base + ctrl->pdiv.offset); pdiv = (val >> ctrl->pdiv.shift) & bit_mask(ctrl->pdiv.width); - clk->rate = (ndiv * parent_rate) >> ctrl->ndiv_int.shift; + clk->rate = (ndiv * parent_rate) >> 20; if (pdiv == 0) clk->rate *= 2; diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c index 79a98506c..3a15347b4 100644 --- a/drivers/clk/bcm/clk-kona.c +++ b/drivers/clk/bcm/clk-kona.c @@ -16,6 +16,7 @@ #include #include +#include /* * "Policies" affect the frequencies of bus clocks provided by a @@ -1010,25 +1011,23 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate, struct bcm_clk_div *div = &bcm_clk->u.peri->div; if (!divider_exists(div)) - return __clk_get_rate(hw->clk); + return clk_hw_get_rate(hw); /* Quietly avoid a zero rate */ return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div, rate ? rate : 1, *parent_rate, NULL); } -static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, struct clk_hw **best_parent) +static int kona_peri_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { struct kona_clk *bcm_clk = to_kona_clk(hw); - struct clk *clk = hw->clk; - struct clk *current_parent; + struct clk_hw *current_parent; unsigned long parent_rate; unsigned long best_delta; unsigned long best_rate; u32 parent_count; + long rate; u32 which; /* @@ -1037,18 +1036,25 @@ static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate, */ WARN_ON_ONCE(bcm_clk->init_data.flags & CLK_SET_RATE_NO_REPARENT); parent_count = (u32)bcm_clk->init_data.num_parents; - if (parent_count < 2) - return kona_peri_clk_round_rate(hw, rate, best_parent_rate); + if (parent_count < 2) { + rate = kona_peri_clk_round_rate(hw, req->rate, + &req->best_parent_rate); + if (rate < 0) + return rate; + + req->rate = rate; + return 0; + } /* Unless we can do better, stick with current parent */ - current_parent = clk_get_parent(clk); - parent_rate = __clk_get_rate(current_parent); - best_rate = kona_peri_clk_round_rate(hw, rate, &parent_rate); - best_delta = abs(best_rate - rate); + current_parent = clk_hw_get_parent(hw); + parent_rate = clk_hw_get_rate(current_parent); + best_rate = kona_peri_clk_round_rate(hw, req->rate, &parent_rate); + best_delta = abs(best_rate - req->rate); /* Check whether any other parent clock can produce a better result */ for (which = 0; which < parent_count; which++) { - struct clk *parent = clk_get_parent_by_index(clk, which); + struct clk_hw *parent = clk_hw_get_parent_by_index(hw, which); unsigned long delta; unsigned long other_rate; @@ -1057,18 +1063,20 @@ static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate, continue; /* We don't support CLK_SET_RATE_PARENT */ - parent_rate = __clk_get_rate(parent); - other_rate = kona_peri_clk_round_rate(hw, rate, &parent_rate); - delta = abs(other_rate - rate); + parent_rate = clk_hw_get_rate(parent); + other_rate = kona_peri_clk_round_rate(hw, req->rate, + &parent_rate); + delta = abs(other_rate - req->rate); if (delta < best_delta) { best_delta = delta; best_rate = other_rate; - *best_parent = __clk_get_hw(parent); - *best_parent_rate = parent_rate; + req->best_parent_hw = parent; + req->best_parent_rate = parent_rate; } } - return best_rate; + req->rate = best_rate; + return 0; } static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index) @@ -1130,7 +1138,7 @@ static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate, if (parent_rate > (unsigned long)LONG_MAX) return -EINVAL; - if (rate == __clk_get_rate(hw->clk)) + if (rate == clk_hw_get_rate(hw)) return 0; if (!divider_exists(div)) @@ -1249,6 +1257,7 @@ bool __init kona_ccu_init(struct ccu_data *ccu) unsigned long flags; unsigned int which; struct clk **clks = ccu->clk_data.clks; + struct kona_clk *kona_clks = ccu->kona_clks; bool success = true; flags = ccu_lock(ccu); @@ -1259,7 +1268,7 @@ bool __init kona_ccu_init(struct ccu_data *ccu) if (!clks[which]) continue; - bcm_clk = to_kona_clk(__clk_get_hw(clks[which])); + bcm_clk = &kona_clks[which]; success &= __kona_clk_init(bcm_clk); } diff --git a/drivers/clk/berlin/berlin2-pll.c b/drivers/clk/berlin/berlin2-pll.c index f4b8d324b..1c2294d3b 100644 --- a/drivers/clk/berlin/berlin2-pll.c +++ b/drivers/clk/berlin/berlin2-pll.c @@ -61,7 +61,7 @@ berlin2_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) fbdiv = (val >> map->fbdiv_shift) & FBDIV_MASK; rfdiv = (val >> map->rfdiv_shift) & RFDIV_MASK; if (rfdiv == 0) { - pr_warn("%s has zero rfdiv\n", __clk_get_name(hw->clk)); + pr_warn("%s has zero rfdiv\n", clk_hw_get_name(hw)); rfdiv = 1; } @@ -70,7 +70,7 @@ berlin2_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) vcodiv = map->vcodiv[vcodivsel]; if (vcodiv == 0) { pr_warn("%s has zero vcodiv (index %d)\n", - __clk_get_name(hw->clk), vcodivsel); + clk_hw_get_name(hw), vcodivsel); vcodiv = 1; } diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c index e619285c6..3bcd42fbb 100644 --- a/drivers/clk/clk-axi-clkgen.c +++ b/drivers/clk/clk-axi-clkgen.c @@ -10,7 +10,6 @@ #include #include -#include #include #include #include diff --git a/drivers/clk/clk-bcm2835.c b/drivers/clk/clk-bcm2835.c index 6b950ca8b..dd295e498 100644 --- a/drivers/clk/clk-bcm2835.c +++ b/drivers/clk/clk-bcm2835.c @@ -32,11 +32,6 @@ void __init bcm2835_init_clocks(void) struct clk *clk; int ret; - clk = clk_register_fixed_rate(NULL, "sys_pclk", NULL, CLK_IS_ROOT, - 250000000); - if (IS_ERR(clk)) - pr_err("sys_pclk not registered\n"); - clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 126000000); if (IS_ERR(clk)) diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c index f01164fad..01877f64e 100644 --- a/drivers/clk/clk-cdce706.c +++ b/drivers/clk/clk-cdce706.c @@ -10,6 +10,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include @@ -309,7 +310,7 @@ static long cdce706_divider_round_rate(struct clk_hw *hw, unsigned long rate, if (!mul) div = CDCE706_DIVIDER_DIVIDER_MAX; - if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) { + if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) { unsigned long best_diff = rate; unsigned long best_div = 0; struct clk *gp_clk = cdce->clkin_clk[cdce->clkin[0].parent]; diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c index 85fafb41e..089bf88ff 100644 --- a/drivers/clk/clk-cdce925.c +++ b/drivers/clk/clk-cdce925.c @@ -10,6 +10,7 @@ * Copyright (C) 2014, Topic Embedded Products * Licenced under GPL */ +#include #include #include #include diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c index 715eec1a9..ff4ef4f1d 100644 --- a/drivers/clk/clk-clps711x.c +++ b/drivers/clk/clk-clps711x.c @@ -9,7 +9,6 @@ * (at your option) any later version. */ -#include #include #include #include diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c index 616f5aef3..4735de066 100644 --- a/drivers/clk/clk-composite.c +++ b/drivers/clk/clk-composite.c @@ -55,78 +55,77 @@ static unsigned long clk_composite_recalc_rate(struct clk_hw *hw, return rate_ops->recalc_rate(rate_hw, parent_rate); } -static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_p) +static int clk_composite_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { struct clk_composite *composite = to_clk_composite(hw); const struct clk_ops *rate_ops = composite->rate_ops; const struct clk_ops *mux_ops = composite->mux_ops; struct clk_hw *rate_hw = composite->rate_hw; struct clk_hw *mux_hw = composite->mux_hw; - struct clk *parent; + struct clk_hw *parent; unsigned long parent_rate; long tmp_rate, best_rate = 0; unsigned long rate_diff; unsigned long best_rate_diff = ULONG_MAX; + long rate; int i; if (rate_hw && rate_ops && rate_ops->determine_rate) { __clk_hw_set_clk(rate_hw, hw); - return rate_ops->determine_rate(rate_hw, rate, min_rate, - max_rate, - best_parent_rate, - best_parent_p); + return rate_ops->determine_rate(rate_hw, req); } else if (rate_hw && rate_ops && rate_ops->round_rate && mux_hw && mux_ops && mux_ops->set_parent) { - *best_parent_p = NULL; + req->best_parent_hw = NULL; - if (__clk_get_flags(hw->clk) & CLK_SET_RATE_NO_REPARENT) { - parent = clk_get_parent(mux_hw->clk); - *best_parent_p = __clk_get_hw(parent); - *best_parent_rate = __clk_get_rate(parent); + if (clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT) { + parent = clk_hw_get_parent(mux_hw); + req->best_parent_hw = parent; + req->best_parent_rate = clk_hw_get_rate(parent); - return rate_ops->round_rate(rate_hw, rate, - best_parent_rate); + rate = rate_ops->round_rate(rate_hw, req->rate, + &req->best_parent_rate); + if (rate < 0) + return rate; + + req->rate = rate; + return 0; } - for (i = 0; i < __clk_get_num_parents(mux_hw->clk); i++) { - parent = clk_get_parent_by_index(mux_hw->clk, i); + for (i = 0; i < clk_hw_get_num_parents(mux_hw); i++) { + parent = clk_hw_get_parent_by_index(mux_hw, i); if (!parent) continue; - parent_rate = __clk_get_rate(parent); + parent_rate = clk_hw_get_rate(parent); - tmp_rate = rate_ops->round_rate(rate_hw, rate, + tmp_rate = rate_ops->round_rate(rate_hw, req->rate, &parent_rate); if (tmp_rate < 0) continue; - rate_diff = abs(rate - tmp_rate); + rate_diff = abs(req->rate - tmp_rate); - if (!rate_diff || !*best_parent_p + if (!rate_diff || !req->best_parent_hw || best_rate_diff > rate_diff) { - *best_parent_p = __clk_get_hw(parent); - *best_parent_rate = parent_rate; + req->best_parent_hw = parent; + req->best_parent_rate = parent_rate; best_rate_diff = rate_diff; best_rate = tmp_rate; } if (!rate_diff) - return rate; + return 0; } - return best_rate; + req->rate = best_rate; + return 0; } else if (mux_hw && mux_ops && mux_ops->determine_rate) { __clk_hw_set_clk(mux_hw, hw); - return mux_ops->determine_rate(mux_hw, rate, min_rate, - max_rate, best_parent_rate, - best_parent_p); + return mux_ops->determine_rate(mux_hw, req); } else { pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n"); - return 0; + return -EINVAL; } } diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index 706b5783c..f24d0a19a 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -78,12 +78,14 @@ static unsigned int _get_table_div(const struct clk_div_table *table, } static unsigned int _get_div(const struct clk_div_table *table, - unsigned int val, unsigned long flags) + unsigned int val, unsigned long flags, u8 width) { if (flags & CLK_DIVIDER_ONE_BASED) return val; if (flags & CLK_DIVIDER_POWER_OF_TWO) return 1 << val; + if (flags & CLK_DIVIDER_MAX_AT_ZERO) + return val ? val : div_mask(width) + 1; if (table) return _get_table_div(table, val); return val + 1; @@ -101,12 +103,14 @@ static unsigned int _get_table_val(const struct clk_div_table *table, } static unsigned int _get_val(const struct clk_div_table *table, - unsigned int div, unsigned long flags) + unsigned int div, unsigned long flags, u8 width) { if (flags & CLK_DIVIDER_ONE_BASED) return div; if (flags & CLK_DIVIDER_POWER_OF_TWO) return __ffs(div); + if (flags & CLK_DIVIDER_MAX_AT_ZERO) + return (div == div_mask(width) + 1) ? 0 : div; if (table) return _get_table_val(table, div); return div - 1; @@ -117,13 +121,14 @@ unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, const struct clk_div_table *table, unsigned long flags) { + struct clk_divider *divider = to_clk_divider(hw); unsigned int div; - div = _get_div(table, val, flags); + div = _get_div(table, val, flags, divider->width); if (!div) { WARN(!(flags & CLK_DIVIDER_ALLOW_ZERO), "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n", - __clk_get_name(hw->clk)); + clk_hw_get_name(hw)); return parent_rate; } @@ -285,7 +290,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, maxdiv = _get_maxdiv(table, width, flags); - if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { + if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) { parent_rate = *best_parent_rate; bestdiv = _div_round(table, parent_rate, rate, flags); bestdiv = bestdiv == 0 ? 1 : bestdiv; @@ -311,7 +316,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, *best_parent_rate = parent_rate_saved; return i; } - parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), + parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), rate * i); now = DIV_ROUND_UP(parent_rate, i); if (_is_best_div(rate, now, best, flags)) { @@ -323,7 +328,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, if (!bestdiv) { bestdiv = _get_maxdiv(table, width, flags); - *best_parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 1); + *best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1); } return bestdiv; @@ -351,7 +356,8 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, if (divider->flags & CLK_DIVIDER_READ_ONLY) { bestdiv = readl(divider->reg) >> divider->shift; bestdiv &= div_mask(divider->width); - bestdiv = _get_div(divider->table, bestdiv, divider->flags); + bestdiv = _get_div(divider->table, bestdiv, divider->flags, + divider->width); return DIV_ROUND_UP(*prate, bestdiv); } @@ -370,7 +376,7 @@ int divider_get_val(unsigned long rate, unsigned long parent_rate, if (!_is_valid_div(table, div, flags)) return -EINVAL; - value = _get_val(table, div, flags); + value = _get_val(table, div, flags, width); return min_t(unsigned int, value, div_mask(width)); } @@ -389,6 +395,8 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, if (divider->lock) spin_lock_irqsave(divider->lock, flags); + else + __acquire(divider->lock); if (divider->flags & CLK_DIVIDER_HIWORD_MASK) { val = div_mask(divider->width) << (divider->shift + 16); @@ -401,6 +409,8 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, if (divider->lock) spin_unlock_irqrestore(divider->lock, flags); + else + __release(divider->lock); return 0; } diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c index 73a8d0ff5..bac4553f0 100644 --- a/drivers/clk/clk-efm32gg.c +++ b/drivers/clk/clk-efm32gg.c @@ -6,7 +6,6 @@ * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ -#include #include #include #include diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c index fccabe497..83de57aec 100644 --- a/drivers/clk/clk-fixed-factor.c +++ b/drivers/clk/clk-fixed-factor.c @@ -41,12 +41,11 @@ static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate, { struct clk_fixed_factor *fix = to_clk_fixed_factor(hw); - if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) { + if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) { unsigned long best_parent; best_parent = (rate / fix->mult) * fix->div; - *prate = __clk_round_rate(__clk_get_parent(hw->clk), - best_parent); + *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent); } return (*prate / fix->div) * fix->mult; diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c index 140eb5844..e85f856b8 100644 --- a/drivers/clk/clk-fractional-divider.c +++ b/drivers/clk/clk-fractional-divider.c @@ -27,11 +27,15 @@ static unsigned long clk_fd_recalc_rate(struct clk_hw *hw, if (fd->lock) spin_lock_irqsave(fd->lock, flags); + else + __acquire(fd->lock); val = clk_readl(fd->reg); if (fd->lock) spin_unlock_irqrestore(fd->lock, flags); + else + __release(fd->lock); m = (val & fd->mmask) >> fd->mshift; n = (val & fd->nmask) >> fd->nshift; @@ -80,6 +84,8 @@ static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate, if (fd->lock) spin_lock_irqsave(fd->lock, flags); + else + __acquire(fd->lock); val = clk_readl(fd->reg); val &= ~(fd->mmask | fd->nmask); @@ -88,6 +94,8 @@ static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate, if (fd->lock) spin_unlock_irqrestore(fd->lock, flags); + else + __release(fd->lock); return 0; } diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c index 551dd0672..de0b322f5 100644 --- a/drivers/clk/clk-gate.c +++ b/drivers/clk/clk-gate.c @@ -52,6 +52,8 @@ static void clk_gate_endisable(struct clk_hw *hw, int enable) if (gate->lock) spin_lock_irqsave(gate->lock, flags); + else + __acquire(gate->lock); if (gate->flags & CLK_GATE_HIWORD_MASK) { reg = BIT(gate->bit_idx + 16); @@ -70,6 +72,8 @@ static void clk_gate_endisable(struct clk_hw *hw, int enable) if (gate->lock) spin_unlock_irqrestore(gate->lock, flags); + else + __release(gate->lock); } static int clk_gate_enable(struct clk_hw *hw) diff --git a/drivers/clk/clk-gpio-gate.c b/drivers/clk/clk-gpio-gate.c deleted file mode 100644 index f564e624f..000000000 --- a/drivers/clk/clk-gpio-gate.c +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Copyright (C) 2013 - 2014 Texas Instruments Incorporated - http://www.ti.com - * Author: Jyri Sarha - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Gpio gated clock implementation - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -/** - * DOC: basic gpio gated clock which can be enabled and disabled - * with gpio output - * Traits of this clock: - * prepare - clk_(un)prepare only ensures parent is (un)prepared - * enable - clk_enable and clk_disable are functional & control gpio - * rate - inherits rate from parent. No clk_set_rate support - * parent - fixed parent. No clk_set_parent support - */ - -#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw) - -static int clk_gpio_gate_enable(struct clk_hw *hw) -{ - struct clk_gpio *clk = to_clk_gpio(hw); - - gpiod_set_value(clk->gpiod, 1); - - return 0; -} - -static void clk_gpio_gate_disable(struct clk_hw *hw) -{ - struct clk_gpio *clk = to_clk_gpio(hw); - - gpiod_set_value(clk->gpiod, 0); -} - -static int clk_gpio_gate_is_enabled(struct clk_hw *hw) -{ - struct clk_gpio *clk = to_clk_gpio(hw); - - return gpiod_get_value(clk->gpiod); -} - -const struct clk_ops clk_gpio_gate_ops = { - .enable = clk_gpio_gate_enable, - .disable = clk_gpio_gate_disable, - .is_enabled = clk_gpio_gate_is_enabled, -}; -EXPORT_SYMBOL_GPL(clk_gpio_gate_ops); - -/** - * clk_register_gpio - register a gpip clock with the clock framework - * @dev: device that is registering this clock - * @name: name of this clock - * @parent_name: name of this clock's parent - * @gpio: gpio number to gate this clock - * @active_low: true if gpio should be set to 0 to enable clock - * @flags: clock flags - */ -struct clk *clk_register_gpio_gate(struct device *dev, const char *name, - const char *parent_name, unsigned gpio, bool active_low, - unsigned long flags) -{ - struct clk_gpio *clk_gpio = NULL; - struct clk *clk = ERR_PTR(-EINVAL); - struct clk_init_data init = { NULL }; - unsigned long gpio_flags; - int err; - - if (active_low) - gpio_flags = GPIOF_ACTIVE_LOW | GPIOF_OUT_INIT_HIGH; - else - gpio_flags = GPIOF_OUT_INIT_LOW; - - if (dev) - err = devm_gpio_request_one(dev, gpio, gpio_flags, name); - else - err = gpio_request_one(gpio, gpio_flags, name); - - if (err) { - pr_err("%s: %s: Error requesting clock control gpio %u\n", - __func__, name, gpio); - return ERR_PTR(err); - } - - if (dev) - clk_gpio = devm_kzalloc(dev, sizeof(struct clk_gpio), - GFP_KERNEL); - else - clk_gpio = kzalloc(sizeof(struct clk_gpio), GFP_KERNEL); - - if (!clk_gpio) { - clk = ERR_PTR(-ENOMEM); - goto clk_register_gpio_gate_err; - } - - init.name = name; - init.ops = &clk_gpio_gate_ops; - init.flags = flags | CLK_IS_BASIC; - init.parent_names = (parent_name ? &parent_name : NULL); - init.num_parents = (parent_name ? 1 : 0); - - clk_gpio->gpiod = gpio_to_desc(gpio); - clk_gpio->hw.init = &init; - - clk = clk_register(dev, &clk_gpio->hw); - - if (!IS_ERR(clk)) - return clk; - - if (!dev) - kfree(clk_gpio); - -clk_register_gpio_gate_err: - if (!dev) - gpio_free(gpio); - - return clk; -} -EXPORT_SYMBOL_GPL(clk_register_gpio_gate); - -#ifdef CONFIG_OF -/** - * The clk_register_gpio_gate has to be delayed, because the EPROBE_DEFER - * can not be handled properly at of_clk_init() call time. - */ - -struct clk_gpio_gate_delayed_register_data { - struct device_node *node; - struct mutex lock; - struct clk *clk; -}; - -static struct clk *of_clk_gpio_gate_delayed_register_get( - struct of_phandle_args *clkspec, - void *_data) -{ - struct clk_gpio_gate_delayed_register_data *data = _data; - struct clk *clk; - const char *clk_name = data->node->name; - const char *parent_name; - int gpio; - enum of_gpio_flags of_flags; - - mutex_lock(&data->lock); - - if (data->clk) { - mutex_unlock(&data->lock); - return data->clk; - } - - gpio = of_get_named_gpio_flags(data->node, "enable-gpios", 0, - &of_flags); - if (gpio < 0) { - mutex_unlock(&data->lock); - if (gpio != -EPROBE_DEFER) - pr_err("%s: %s: Can't get 'enable-gpios' DT property\n", - __func__, clk_name); - return ERR_PTR(gpio); - } - - parent_name = of_clk_get_parent_name(data->node, 0); - - clk = clk_register_gpio_gate(NULL, clk_name, parent_name, gpio, - of_flags & OF_GPIO_ACTIVE_LOW, 0); - if (IS_ERR(clk)) { - mutex_unlock(&data->lock); - return clk; - } - - data->clk = clk; - mutex_unlock(&data->lock); - - return clk; -} - -/** - * of_gpio_gate_clk_setup() - Setup function for gpio controlled clock - */ -static void __init of_gpio_gate_clk_setup(struct device_node *node) -{ - struct clk_gpio_gate_delayed_register_data *data; - - data = kzalloc(sizeof(struct clk_gpio_gate_delayed_register_data), - GFP_KERNEL); - if (!data) - return; - - data->node = node; - mutex_init(&data->lock); - - of_clk_add_provider(node, of_clk_gpio_gate_delayed_register_get, data); -} -CLK_OF_DECLARE(gpio_gate_clk, "gpio-gate-clock", of_gpio_gate_clk_setup); -#endif diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c new file mode 100644 index 000000000..10819e248 --- /dev/null +++ b/drivers/clk/clk-gpio.c @@ -0,0 +1,325 @@ +/* + * Copyright (C) 2013 - 2014 Texas Instruments Incorporated - http://www.ti.com + * + * Authors: + * Jyri Sarha + * Sergej Sawazki + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Gpio controlled clock implementation + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * DOC: basic gpio gated clock which can be enabled and disabled + * with gpio output + * Traits of this clock: + * prepare - clk_(un)prepare only ensures parent is (un)prepared + * enable - clk_enable and clk_disable are functional & control gpio + * rate - inherits rate from parent. No clk_set_rate support + * parent - fixed parent. No clk_set_parent support + */ + +#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw) + +static int clk_gpio_gate_enable(struct clk_hw *hw) +{ + struct clk_gpio *clk = to_clk_gpio(hw); + + gpiod_set_value(clk->gpiod, 1); + + return 0; +} + +static void clk_gpio_gate_disable(struct clk_hw *hw) +{ + struct clk_gpio *clk = to_clk_gpio(hw); + + gpiod_set_value(clk->gpiod, 0); +} + +static int clk_gpio_gate_is_enabled(struct clk_hw *hw) +{ + struct clk_gpio *clk = to_clk_gpio(hw); + + return gpiod_get_value(clk->gpiod); +} + +const struct clk_ops clk_gpio_gate_ops = { + .enable = clk_gpio_gate_enable, + .disable = clk_gpio_gate_disable, + .is_enabled = clk_gpio_gate_is_enabled, +}; +EXPORT_SYMBOL_GPL(clk_gpio_gate_ops); + +/** + * DOC: basic clock multiplexer which can be controlled with a gpio output + * Traits of this clock: + * prepare - clk_prepare only ensures that parents are prepared + * rate - rate is only affected by parent switching. No clk_set_rate support + * parent - parent is adjustable through clk_set_parent + */ + +static u8 clk_gpio_mux_get_parent(struct clk_hw *hw) +{ + struct clk_gpio *clk = to_clk_gpio(hw); + + return gpiod_get_value(clk->gpiod); +} + +static int clk_gpio_mux_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_gpio *clk = to_clk_gpio(hw); + + gpiod_set_value(clk->gpiod, index); + + return 0; +} + +const struct clk_ops clk_gpio_mux_ops = { + .get_parent = clk_gpio_mux_get_parent, + .set_parent = clk_gpio_mux_set_parent, + .determine_rate = __clk_mux_determine_rate, +}; +EXPORT_SYMBOL_GPL(clk_gpio_mux_ops); + +static struct clk *clk_register_gpio(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, unsigned gpio, + bool active_low, unsigned long flags, + const struct clk_ops *clk_gpio_ops) +{ + struct clk_gpio *clk_gpio; + struct clk *clk; + struct clk_init_data init = {}; + unsigned long gpio_flags; + int err; + + if (dev) + clk_gpio = devm_kzalloc(dev, sizeof(*clk_gpio), GFP_KERNEL); + else + clk_gpio = kzalloc(sizeof(*clk_gpio), GFP_KERNEL); + + if (!clk_gpio) + return ERR_PTR(-ENOMEM); + + if (active_low) + gpio_flags = GPIOF_ACTIVE_LOW | GPIOF_OUT_INIT_HIGH; + else + gpio_flags = GPIOF_OUT_INIT_LOW; + + if (dev) + err = devm_gpio_request_one(dev, gpio, gpio_flags, name); + else + err = gpio_request_one(gpio, gpio_flags, name); + if (err) { + if (err != -EPROBE_DEFER) + pr_err("%s: %s: Error requesting clock control gpio %u\n", + __func__, name, gpio); + if (!dev) + kfree(clk_gpio); + + return ERR_PTR(err); + } + + init.name = name; + init.ops = clk_gpio_ops; + init.flags = flags | CLK_IS_BASIC; + init.parent_names = parent_names; + init.num_parents = num_parents; + + clk_gpio->gpiod = gpio_to_desc(gpio); + clk_gpio->hw.init = &init; + + if (dev) + clk = devm_clk_register(dev, &clk_gpio->hw); + else + clk = clk_register(NULL, &clk_gpio->hw); + + if (!IS_ERR(clk)) + return clk; + + if (!dev) { + gpiod_put(clk_gpio->gpiod); + kfree(clk_gpio); + } + + return clk; +} + +/** + * clk_register_gpio_gate - register a gpio clock gate with the clock framework + * @dev: device that is registering this clock + * @name: name of this clock + * @parent_name: name of this clock's parent + * @gpio: gpio number to gate this clock + * @active_low: true if gpio should be set to 0 to enable clock + * @flags: clock flags + */ +struct clk *clk_register_gpio_gate(struct device *dev, const char *name, + const char *parent_name, unsigned gpio, bool active_low, + unsigned long flags) +{ + return clk_register_gpio(dev, name, + (parent_name ? &parent_name : NULL), + (parent_name ? 1 : 0), gpio, active_low, flags, + &clk_gpio_gate_ops); +} +EXPORT_SYMBOL_GPL(clk_register_gpio_gate); + +/** + * clk_register_gpio_mux - register a gpio clock mux with the clock framework + * @dev: device that is registering this clock + * @name: name of this clock + * @parent_names: names of this clock's parents + * @num_parents: number of parents listed in @parent_names + * @gpio: gpio number to gate this clock + * @active_low: true if gpio should be set to 0 to enable clock + * @flags: clock flags + */ +struct clk *clk_register_gpio_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, unsigned gpio, + bool active_low, unsigned long flags) +{ + if (num_parents != 2) { + pr_err("mux-clock %s must have 2 parents\n", name); + return ERR_PTR(-EINVAL); + } + + return clk_register_gpio(dev, name, parent_names, num_parents, + gpio, active_low, flags, &clk_gpio_mux_ops); +} +EXPORT_SYMBOL_GPL(clk_register_gpio_mux); + +#ifdef CONFIG_OF +/** + * clk_register_get() has to be delayed, because -EPROBE_DEFER + * can not be handled properly at of_clk_init() call time. + */ + +struct clk_gpio_delayed_register_data { + const char *gpio_name; + struct device_node *node; + struct mutex lock; + struct clk *clk; + struct clk *(*clk_register_get)(const char *name, + const char * const *parent_names, u8 num_parents, + unsigned gpio, bool active_low); +}; + +static struct clk *of_clk_gpio_delayed_register_get( + struct of_phandle_args *clkspec, void *_data) +{ + struct clk_gpio_delayed_register_data *data = _data; + struct clk *clk; + const char **parent_names; + int i, num_parents; + int gpio; + enum of_gpio_flags of_flags; + + mutex_lock(&data->lock); + + if (data->clk) { + mutex_unlock(&data->lock); + return data->clk; + } + + gpio = of_get_named_gpio_flags(data->node, data->gpio_name, 0, + &of_flags); + if (gpio < 0) { + mutex_unlock(&data->lock); + if (gpio == -EPROBE_DEFER) + pr_debug("%s: %s: GPIOs not yet available, retry later\n", + data->node->name, __func__); + else + pr_err("%s: %s: Can't get '%s' DT property\n", + data->node->name, __func__, + data->gpio_name); + return ERR_PTR(gpio); + } + + num_parents = of_clk_get_parent_count(data->node); + + parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL); + if (!parent_names) { + clk = ERR_PTR(-ENOMEM); + goto out; + } + + for (i = 0; i < num_parents; i++) + parent_names[i] = of_clk_get_parent_name(data->node, i); + + clk = data->clk_register_get(data->node->name, parent_names, + num_parents, gpio, of_flags & OF_GPIO_ACTIVE_LOW); + if (IS_ERR(clk)) + goto out; + + data->clk = clk; +out: + mutex_unlock(&data->lock); + kfree(parent_names); + + return clk; +} + +static struct clk *of_clk_gpio_gate_delayed_register_get(const char *name, + const char * const *parent_names, u8 num_parents, + unsigned gpio, bool active_low) +{ + return clk_register_gpio_gate(NULL, name, parent_names[0], + gpio, active_low, 0); +} + +static struct clk *of_clk_gpio_mux_delayed_register_get(const char *name, + const char * const *parent_names, u8 num_parents, unsigned gpio, + bool active_low) +{ + return clk_register_gpio_mux(NULL, name, parent_names, num_parents, + gpio, active_low, 0); +} + +static void __init of_gpio_clk_setup(struct device_node *node, + const char *gpio_name, + struct clk *(*clk_register_get)(const char *name, + const char * const *parent_names, + u8 num_parents, + unsigned gpio, bool active_low)) +{ + struct clk_gpio_delayed_register_data *data; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return; + + data->node = node; + data->gpio_name = gpio_name; + data->clk_register_get = clk_register_get; + mutex_init(&data->lock); + + of_clk_add_provider(node, of_clk_gpio_delayed_register_get, data); +} + +static void __init of_gpio_gate_clk_setup(struct device_node *node) +{ + of_gpio_clk_setup(node, "enable-gpios", + of_clk_gpio_gate_delayed_register_get); +} +CLK_OF_DECLARE(gpio_gate_clk, "gpio-gate-clock", of_gpio_gate_clk_setup); + +void __init of_gpio_mux_clk_setup(struct device_node *node) +{ + of_gpio_clk_setup(node, "select-gpios", + of_clk_gpio_mux_delayed_register_get); +} +CLK_OF_DECLARE(gpio_mux_clk, "gpio-mux-clock", of_gpio_mux_clk_setup); +#endif diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c index 2e7e9d979..be3a21abb 100644 --- a/drivers/clk/clk-highbank.c +++ b/drivers/clk/clk-highbank.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/clk/clk-moxart.c b/drivers/clk/clk-moxart.c index 5181b89c3..f37f71964 100644 --- a/drivers/clk/clk-moxart.c +++ b/drivers/clk/clk-moxart.c @@ -10,6 +10,7 @@ * warranty of any kind, whether express or implied. */ +#include #include #include #include diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c index 6066a01b2..7129c86a7 100644 --- a/drivers/clk/clk-mux.c +++ b/drivers/clk/clk-mux.c @@ -10,7 +10,6 @@ * Simple multiplexer clock implementation */ -#include #include #include #include @@ -32,7 +31,7 @@ static u8 clk_mux_get_parent(struct clk_hw *hw) { struct clk_mux *mux = to_clk_mux(hw); - int num_parents = __clk_get_num_parents(hw->clk); + int num_parents = clk_hw_get_num_parents(hw); u32 val; /* @@ -85,6 +84,8 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index) if (mux->lock) spin_lock_irqsave(mux->lock, flags); + else + __acquire(mux->lock); if (mux->flags & CLK_MUX_HIWORD_MASK) { val = mux->mask << (mux->shift + 16); @@ -97,6 +98,8 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index) if (mux->lock) spin_unlock_irqrestore(mux->lock, flags); + else + __release(mux->lock); return 0; } diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c index c9487179f..e4d8a991c 100644 --- a/drivers/clk/clk-nomadik.c +++ b/drivers/clk/clk-nomadik.c @@ -8,8 +8,7 @@ #define pr_fmt(fmt) "Nomadik SRC clocks: " fmt #include -#include -#include +#include #include #include #include diff --git a/drivers/clk/clk-palmas.c b/drivers/clk/clk-palmas.c index 45a535ab4..8e3039f0c 100644 --- a/drivers/clk/clk-palmas.c +++ b/drivers/clk/clk-palmas.c @@ -18,7 +18,6 @@ */ #include -#include #include #include #include diff --git a/drivers/clk/clk-rk808.c b/drivers/clk/clk-rk808.c index 83902b9cd..0fee2f4ca 100644 --- a/drivers/clk/clk-rk808.c +++ b/drivers/clk/clk-rk808.c @@ -15,7 +15,6 @@ * more details. */ -#include #include #include #include diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c index 9b13a303d..d266299df 100644 --- a/drivers/clk/clk-s2mps11.c +++ b/drivers/clk/clk-s2mps11.c @@ -58,21 +58,17 @@ static struct s2mps11_clk *to_s2mps11_clk(struct clk_hw *hw) static int s2mps11_clk_prepare(struct clk_hw *hw) { struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); - int ret; - ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, + return regmap_update_bits(s2mps11->iodev->regmap_pmic, s2mps11->reg, s2mps11->mask, s2mps11->mask); - - return ret; } static void s2mps11_clk_unprepare(struct clk_hw *hw) { struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); - int ret; - ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, s2mps11->reg, + regmap_update_bits(s2mps11->iodev->regmap_pmic, s2mps11->reg, s2mps11->mask, ~s2mps11->mask); } @@ -186,15 +182,15 @@ static int s2mps11_clk_probe(struct platform_device *pdev) struct clk_init_data *clks_init; int i, ret = 0; - s2mps11_clks = devm_kzalloc(&pdev->dev, sizeof(*s2mps11_clk) * - S2MPS11_CLKS_NUM, GFP_KERNEL); + s2mps11_clks = devm_kcalloc(&pdev->dev, S2MPS11_CLKS_NUM, + sizeof(*s2mps11_clk), GFP_KERNEL); if (!s2mps11_clks) return -ENOMEM; s2mps11_clk = s2mps11_clks; - clk_table = devm_kzalloc(&pdev->dev, sizeof(struct clk *) * - S2MPS11_CLKS_NUM, GFP_KERNEL); + clk_table = devm_kcalloc(&pdev->dev, S2MPS11_CLKS_NUM, + sizeof(struct clk *), GFP_KERNEL); if (!clk_table) return -ENOMEM; @@ -246,7 +242,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev) s2mps11_name(s2mps11_clk), NULL); if (!s2mps11_clk->lookup) { ret = -ENOMEM; - goto err_lup; + goto err_reg; } } @@ -265,16 +261,10 @@ static int s2mps11_clk_probe(struct platform_device *pdev) platform_set_drvdata(pdev, s2mps11_clks); return ret; -err_lup: - devm_clk_unregister(&pdev->dev, s2mps11_clk->clk); + err_reg: - while (s2mps11_clk > s2mps11_clks) { - if (s2mps11_clk->lookup) { - clkdev_drop(s2mps11_clk->lookup); - devm_clk_unregister(&pdev->dev, s2mps11_clk->clk); - } - s2mps11_clk--; - } + while (--i >= 0) + clkdev_drop(s2mps11_clks[i].lookup); return ret; } @@ -322,7 +312,7 @@ static int __init s2mps11_clk_init(void) } subsys_initcall(s2mps11_clk_init); -static void __init s2mps11_clk_cleanup(void) +static void __exit s2mps11_clk_cleanup(void) { platform_driver_unregister(&s2mps11_clk_driver); } diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c index e39e1e680..5596c0aac 100644 --- a/drivers/clk/clk-si5351.c +++ b/drivers/clk/clk-si5351.c @@ -18,7 +18,7 @@ #include #include -#include +#include #include #include #include @@ -439,7 +439,7 @@ static unsigned long si5351_pll_recalc_rate(struct clk_hw *hw, dev_dbg(&hwdata->drvdata->client->dev, "%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, parent_rate = %lu, rate = %lu\n", - __func__, __clk_get_name(hwdata->hw.clk), + __func__, clk_hw_get_name(hw), hwdata->params.p1, hwdata->params.p2, hwdata->params.p3, parent_rate, (unsigned long)rate); @@ -497,7 +497,7 @@ static long si5351_pll_round_rate(struct clk_hw *hw, unsigned long rate, dev_dbg(&hwdata->drvdata->client->dev, "%s - %s: a = %lu, b = %lu, c = %lu, parent_rate = %lu, rate = %lu\n", - __func__, __clk_get_name(hwdata->hw.clk), a, b, c, + __func__, clk_hw_get_name(hw), a, b, c, *parent_rate, rate); return rate; @@ -521,7 +521,7 @@ static int si5351_pll_set_rate(struct clk_hw *hw, unsigned long rate, dev_dbg(&hwdata->drvdata->client->dev, "%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, parent_rate = %lu, rate = %lu\n", - __func__, __clk_get_name(hwdata->hw.clk), + __func__, clk_hw_get_name(hw), hwdata->params.p1, hwdata->params.p2, hwdata->params.p3, parent_rate, rate); @@ -632,7 +632,7 @@ static unsigned long si5351_msynth_recalc_rate(struct clk_hw *hw, dev_dbg(&hwdata->drvdata->client->dev, "%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, m = %lu, parent_rate = %lu, rate = %lu\n", - __func__, __clk_get_name(hwdata->hw.clk), + __func__, clk_hw_get_name(hw), hwdata->params.p1, hwdata->params.p2, hwdata->params.p3, m, parent_rate, (unsigned long)rate); @@ -663,7 +663,7 @@ static long si5351_msynth_round_rate(struct clk_hw *hw, unsigned long rate, divby4 = 1; /* multisync can set pll */ - if (__clk_get_flags(hwdata->hw.clk) & CLK_SET_RATE_PARENT) { + if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) { /* * find largest integer divider for max * vco frequency and given target rate @@ -745,7 +745,7 @@ static long si5351_msynth_round_rate(struct clk_hw *hw, unsigned long rate, dev_dbg(&hwdata->drvdata->client->dev, "%s - %s: a = %lu, b = %lu, c = %lu, divby4 = %d, parent_rate = %lu, rate = %lu\n", - __func__, __clk_get_name(hwdata->hw.clk), a, b, c, divby4, + __func__, clk_hw_get_name(hw), a, b, c, divby4, *parent_rate, rate); return rate; @@ -777,7 +777,7 @@ static int si5351_msynth_set_rate(struct clk_hw *hw, unsigned long rate, dev_dbg(&hwdata->drvdata->client->dev, "%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, divby4 = %d, parent_rate = %lu, rate = %lu\n", - __func__, __clk_get_name(hwdata->hw.clk), + __func__, clk_hw_get_name(hw), hwdata->params.p1, hwdata->params.p2, hwdata->params.p3, divby4, parent_rate, rate); @@ -1013,7 +1013,7 @@ static long si5351_clkout_round_rate(struct clk_hw *hw, unsigned long rate, rate = SI5351_CLKOUT_MIN_FREQ; /* request frequency if multisync master */ - if (__clk_get_flags(hwdata->hw.clk) & CLK_SET_RATE_PARENT) { + if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) { /* use r divider for frequencies below 1MHz */ rdiv = SI5351_OUTPUT_CLK_DIV_1; while (rate < SI5351_MULTISYNTH_MIN_FREQ && @@ -1042,7 +1042,7 @@ static long si5351_clkout_round_rate(struct clk_hw *hw, unsigned long rate, dev_dbg(&hwdata->drvdata->client->dev, "%s - %s: rdiv = %u, parent_rate = %lu, rate = %lu\n", - __func__, __clk_get_name(hwdata->hw.clk), (1 << rdiv), + __func__, clk_hw_get_name(hw), (1 << rdiv), *parent_rate, rate); return rate; @@ -1093,7 +1093,7 @@ static int si5351_clkout_set_rate(struct clk_hw *hw, unsigned long rate, dev_dbg(&hwdata->drvdata->client->dev, "%s - %s: rdiv = %u, parent_rate = %lu, rate = %lu\n", - __func__, __clk_get_name(hwdata->hw.clk), (1 << rdiv), + __func__, clk_hw_get_name(hw), (1 << rdiv), parent_rate, rate); return 0; diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c index 20a5aec98..cf478aa9f 100644 --- a/drivers/clk/clk-si570.c +++ b/drivers/clk/clk-si570.c @@ -19,6 +19,7 @@ * GNU General Public License for more details. */ +#include #include #include #include diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c index 3f6f7ad39..fd89e7711 100644 --- a/drivers/clk/clk-stm32f4.c +++ b/drivers/clk/clk-stm32f4.c @@ -175,11 +175,10 @@ static long clk_apb_mul_round_rate(struct clk_hw *hw, unsigned long rate, if (readl(base + STM32F4_RCC_CFGR) & BIT(am->bit_idx)) mult = 2; - if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) { + if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) { unsigned long best_parent = rate / mult; - *prate = - __clk_round_rate(__clk_get_parent(hw->clk), best_parent); + *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent); } return *prate * mult; diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c index 4a755135b..8e5ed649a 100644 --- a/drivers/clk/clk-twl6040.c +++ b/drivers/clk/clk-twl6040.c @@ -20,7 +20,6 @@ * */ -#include #include #include #include @@ -91,7 +90,7 @@ static int twl6040_clk_probe(struct platform_device *pdev) clkdata->twl6040 = twl6040; clkdata->mcpdm_fclk.init = &wm831x_clkout_init; - clkdata->clk = clk_register(&pdev->dev, &clkdata->mcpdm_fclk); + clkdata->clk = devm_clk_register(&pdev->dev, &clkdata->mcpdm_fclk); if (IS_ERR(clkdata->clk)) return PTR_ERR(clkdata->clk); @@ -100,21 +99,11 @@ static int twl6040_clk_probe(struct platform_device *pdev) return 0; } -static int twl6040_clk_remove(struct platform_device *pdev) -{ - struct twl6040_clk *clkdata = platform_get_drvdata(pdev); - - clk_unregister(clkdata->clk); - - return 0; -} - static struct platform_driver twl6040_clk_driver = { .driver = { .name = "twl6040-clk", }, .probe = twl6040_clk_probe, - .remove = twl6040_clk_remove, }; module_platform_driver(twl6040_clk_driver); diff --git a/drivers/clk/clk-u300.c b/drivers/clk/clk-u300.c index 18bf5e576..95d1742da 100644 --- a/drivers/clk/clk-u300.c +++ b/drivers/clk/clk-u300.c @@ -5,8 +5,8 @@ * Author: Linus Walleij * Author: Jonas Aaberg */ -#include #include +#include #include #include #include diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c index ef67719f4..43f9d1525 100644 --- a/drivers/clk/clk-wm831x.c +++ b/drivers/clk/clk-wm831x.c @@ -12,7 +12,6 @@ * */ -#include #include #include #include diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c index f26b3ac36..96a6190ac 100644 --- a/drivers/clk/clk-xgene.c +++ b/drivers/clk/clk-xgene.c @@ -60,7 +60,6 @@ enum xgene_pll_type { struct xgene_clk_pll { struct clk_hw hw; - const char *name; void __iomem *reg; spinlock_t *lock; u32 pll_offset; @@ -75,7 +74,7 @@ static int xgene_clk_pll_is_enabled(struct clk_hw *hw) u32 data; data = xgene_clk_read(pllclk->reg + pllclk->pll_offset); - pr_debug("%s pll %s\n", pllclk->name, + pr_debug("%s pll %s\n", clk_hw_get_name(hw), data & REGSPEC_RESET_F1_MASK ? "disabled" : "enabled"); return data & REGSPEC_RESET_F1_MASK ? 0 : 1; @@ -113,7 +112,7 @@ static unsigned long xgene_clk_pll_recalc_rate(struct clk_hw *hw, fref = parent_rate / nref; fvco = fref * nfb; } - pr_debug("%s pll recalc rate %ld parent %ld\n", pllclk->name, + pr_debug("%s pll recalc rate %ld parent %ld\n", clk_hw_get_name(hw), fvco / nout, parent_rate); return fvco / nout; @@ -146,7 +145,6 @@ static struct clk *xgene_register_clk_pll(struct device *dev, init.parent_names = parent_name ? &parent_name : NULL; init.num_parents = parent_name ? 1 : 0; - apmclk->name = name; apmclk->reg = reg; apmclk->lock = lock; apmclk->pll_offset = pll_offset; @@ -210,7 +208,6 @@ struct xgene_dev_parameters { struct xgene_clk { struct clk_hw hw; - const char *name; spinlock_t *lock; struct xgene_dev_parameters param; }; @@ -228,7 +225,7 @@ static int xgene_clk_enable(struct clk_hw *hw) spin_lock_irqsave(pclk->lock, flags); if (pclk->param.csr_reg != NULL) { - pr_debug("%s clock enabled\n", pclk->name); + pr_debug("%s clock enabled\n", clk_hw_get_name(hw)); reg = __pa(pclk->param.csr_reg); /* First enable the clock */ data = xgene_clk_read(pclk->param.csr_reg + @@ -237,7 +234,7 @@ static int xgene_clk_enable(struct clk_hw *hw) xgene_clk_write(data, pclk->param.csr_reg + pclk->param.reg_clk_offset); pr_debug("%s clock PADDR base %pa clk offset 0x%08X mask 0x%08X value 0x%08X\n", - pclk->name, ®, + clk_hw_get_name(hw), ®, pclk->param.reg_clk_offset, pclk->param.reg_clk_mask, data); @@ -248,7 +245,7 @@ static int xgene_clk_enable(struct clk_hw *hw) xgene_clk_write(data, pclk->param.csr_reg + pclk->param.reg_csr_offset); pr_debug("%s CSR RESET PADDR base %pa csr offset 0x%08X mask 0x%08X value 0x%08X\n", - pclk->name, ®, + clk_hw_get_name(hw), ®, pclk->param.reg_csr_offset, pclk->param.reg_csr_mask, data); } @@ -269,7 +266,7 @@ static void xgene_clk_disable(struct clk_hw *hw) spin_lock_irqsave(pclk->lock, flags); if (pclk->param.csr_reg != NULL) { - pr_debug("%s clock disabled\n", pclk->name); + pr_debug("%s clock disabled\n", clk_hw_get_name(hw)); /* First put the CSR in reset */ data = xgene_clk_read(pclk->param.csr_reg + pclk->param.reg_csr_offset); @@ -295,10 +292,10 @@ static int xgene_clk_is_enabled(struct clk_hw *hw) u32 data = 0; if (pclk->param.csr_reg != NULL) { - pr_debug("%s clock checking\n", pclk->name); + pr_debug("%s clock checking\n", clk_hw_get_name(hw)); data = xgene_clk_read(pclk->param.csr_reg + pclk->param.reg_clk_offset); - pr_debug("%s clock is %s\n", pclk->name, + pr_debug("%s clock is %s\n", clk_hw_get_name(hw), data & pclk->param.reg_clk_mask ? "enabled" : "disabled"); } @@ -321,11 +318,13 @@ static unsigned long xgene_clk_recalc_rate(struct clk_hw *hw, data &= (1 << pclk->param.reg_divider_width) - 1; pr_debug("%s clock recalc rate %ld parent %ld\n", - pclk->name, parent_rate / data, parent_rate); + clk_hw_get_name(hw), + parent_rate / data, parent_rate); + return parent_rate / data; } else { pr_debug("%s clock recalc rate %ld parent %ld\n", - pclk->name, parent_rate, parent_rate); + clk_hw_get_name(hw), parent_rate, parent_rate); return parent_rate; } } @@ -357,7 +356,7 @@ static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate, data |= divider; xgene_clk_write(data, pclk->param.divider_reg + pclk->param.reg_divider_offset); - pr_debug("%s clock set rate %ld\n", pclk->name, + pr_debug("%s clock set rate %ld\n", clk_hw_get_name(hw), parent_rate / divider_save); } else { divider_save = 1; @@ -419,7 +418,6 @@ static struct clk *xgene_register_clk(struct device *dev, init.parent_names = parent_name ? &parent_name : NULL; init.num_parents = parent_name ? 1 : 0; - apmclk->name = name; apmclk->lock = lock; apmclk->hw.init = &init; apmclk->param = *parameters; diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index ddb4b5410..0ebcf4497 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -9,6 +9,7 @@ * Standard functionality for the common clock API. See Documentation/clk.txt */ +#include #include #include #include @@ -56,8 +57,11 @@ struct clk_core { struct clk_core *new_parent; struct clk_core *new_child; unsigned long flags; + bool orphan; unsigned int enable_count; unsigned int prepare_count; + unsigned long min_rate; + unsigned long max_rate; unsigned long accuracy; int phase; struct hlist_head children; @@ -111,12 +115,14 @@ static void clk_prepare_unlock(void) } static unsigned long clk_enable_lock(void) + __acquires(enable_lock) { unsigned long flags; if (!spin_trylock_irqsave(&enable_lock, flags)) { if (enable_owner == current) { enable_refcnt++; + __acquire(enable_lock); return flags; } spin_lock_irqsave(&enable_lock, flags); @@ -129,12 +135,15 @@ static unsigned long clk_enable_lock(void) } static void clk_enable_unlock(unsigned long flags) + __releases(enable_lock) { WARN_ON_ONCE(enable_owner != current); WARN_ON_ONCE(enable_refcnt == 0); - if (--enable_refcnt) + if (--enable_refcnt) { + __release(enable_lock); return; + } enable_owner = NULL; spin_unlock_irqrestore(&enable_lock, flags); } @@ -269,27 +278,29 @@ const char *__clk_get_name(struct clk *clk) } EXPORT_SYMBOL_GPL(__clk_get_name); +const char *clk_hw_get_name(const struct clk_hw *hw) +{ + return hw->core->name; +} +EXPORT_SYMBOL_GPL(clk_hw_get_name); + struct clk_hw *__clk_get_hw(struct clk *clk) { return !clk ? NULL : clk->core->hw; } EXPORT_SYMBOL_GPL(__clk_get_hw); -u8 __clk_get_num_parents(struct clk *clk) +unsigned int clk_hw_get_num_parents(const struct clk_hw *hw) { - return !clk ? 0 : clk->core->num_parents; + return hw->core->num_parents; } -EXPORT_SYMBOL_GPL(__clk_get_num_parents); +EXPORT_SYMBOL_GPL(clk_hw_get_num_parents); -struct clk *__clk_get_parent(struct clk *clk) +struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw) { - if (!clk) - return NULL; - - /* TODO: Create a per-user clk and change callers to call clk_put */ - return !clk->core->parent ? NULL : clk->core->parent->hw->clk; + return hw->core->parent ? hw->core->parent->hw : NULL; } -EXPORT_SYMBOL_GPL(__clk_get_parent); +EXPORT_SYMBOL_GPL(clk_hw_get_parent); static struct clk_core *__clk_lookup_subtree(const char *name, struct clk_core *core) @@ -348,18 +359,16 @@ static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, return core->parents[index]; } -struct clk *clk_get_parent_by_index(struct clk *clk, u8 index) +struct clk_hw * +clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index) { struct clk_core *parent; - if (!clk) - return NULL; - - parent = clk_core_get_parent_by_index(clk->core, index); + parent = clk_core_get_parent_by_index(hw->core, index); - return !parent ? NULL : parent->hw->clk; + return !parent ? NULL : parent->hw; } -EXPORT_SYMBOL_GPL(clk_get_parent_by_index); +EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index); unsigned int __clk_get_enable_count(struct clk *clk) { @@ -387,14 +396,11 @@ out: return ret; } -unsigned long __clk_get_rate(struct clk *clk) +unsigned long clk_hw_get_rate(const struct clk_hw *hw) { - if (!clk) - return 0; - - return clk_core_get_rate_nolock(clk->core); + return clk_core_get_rate_nolock(hw->core); } -EXPORT_SYMBOL_GPL(__clk_get_rate); +EXPORT_SYMBOL_GPL(clk_hw_get_rate); static unsigned long __clk_get_accuracy(struct clk_core *core) { @@ -410,12 +416,15 @@ unsigned long __clk_get_flags(struct clk *clk) } EXPORT_SYMBOL_GPL(__clk_get_flags); -bool __clk_is_prepared(struct clk *clk) +unsigned long clk_hw_get_flags(const struct clk_hw *hw) { - if (!clk) - return false; + return hw->core->flags; +} +EXPORT_SYMBOL_GPL(clk_hw_get_flags); - return clk_core_is_prepared(clk->core); +bool clk_hw_is_prepared(const struct clk_hw *hw) +{ + return clk_core_is_prepared(hw->core); } bool __clk_is_enabled(struct clk *clk) @@ -436,28 +445,31 @@ static bool mux_is_better_rate(unsigned long rate, unsigned long now, return now <= rate && now > best; } -static long -clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_p, +static int +clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req, unsigned long flags) { struct clk_core *core = hw->core, *parent, *best_parent = NULL; - int i, num_parents; - unsigned long parent_rate, best = 0; + int i, num_parents, ret; + unsigned long best = 0; + struct clk_rate_request parent_req = *req; /* if NO_REPARENT flag set, pass through to current parent */ if (core->flags & CLK_SET_RATE_NO_REPARENT) { parent = core->parent; - if (core->flags & CLK_SET_RATE_PARENT) - best = __clk_determine_rate(parent ? parent->hw : NULL, - rate, min_rate, max_rate); - else if (parent) + if (core->flags & CLK_SET_RATE_PARENT) { + ret = __clk_determine_rate(parent ? parent->hw : NULL, + &parent_req); + if (ret) + return ret; + + best = parent_req.rate; + } else if (parent) { best = clk_core_get_rate_nolock(parent); - else + } else { best = clk_core_get_rate_nolock(core); + } + goto out; } @@ -467,24 +479,33 @@ clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate, parent = clk_core_get_parent_by_index(core, i); if (!parent) continue; - if (core->flags & CLK_SET_RATE_PARENT) - parent_rate = __clk_determine_rate(parent->hw, rate, - min_rate, - max_rate); - else - parent_rate = clk_core_get_rate_nolock(parent); - if (mux_is_better_rate(rate, parent_rate, best, flags)) { + + if (core->flags & CLK_SET_RATE_PARENT) { + parent_req = *req; + ret = __clk_determine_rate(parent->hw, &parent_req); + if (ret) + continue; + } else { + parent_req.rate = clk_core_get_rate_nolock(parent); + } + + if (mux_is_better_rate(req->rate, parent_req.rate, + best, flags)) { best_parent = parent; - best = parent_rate; + best = parent_req.rate; } } + if (!best_parent) + return -EINVAL; + out: if (best_parent) - *best_parent_p = best_parent->hw; - *best_parent_rate = best; + req->best_parent_hw = best_parent->hw; + req->best_parent_rate = best; + req->rate = best; - return best; + return 0; } struct clk *__clk_lookup(const char *name) @@ -500,8 +521,8 @@ static void clk_core_get_boundaries(struct clk_core *core, { struct clk *clk_user; - *min_rate = 0; - *max_rate = ULONG_MAX; + *min_rate = core->min_rate; + *max_rate = core->max_rate; hlist_for_each_entry(clk_user, &core->clks, clks_node) *min_rate = max(*min_rate, clk_user->min_rate); @@ -510,33 +531,30 @@ static void clk_core_get_boundaries(struct clk_core *core, *max_rate = min(*max_rate, clk_user->max_rate); } +void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, + unsigned long max_rate) +{ + hw->core->min_rate = min_rate; + hw->core->max_rate = max_rate; +} +EXPORT_SYMBOL_GPL(clk_hw_set_rate_range); + /* * Helper for finding best parent to provide a given frequency. This can be used * directly as a determine_rate callback (e.g. for a mux), or from a more * complex clock that may combine a mux with other operations. */ -long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_p) +int __clk_mux_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { - return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate, - best_parent_rate, - best_parent_p, 0); + return clk_mux_determine_rate_flags(hw, req, 0); } EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); -long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_p) +int __clk_mux_determine_rate_closest(struct clk_hw *hw, + struct clk_rate_request *req) { - return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate, - best_parent_rate, - best_parent_p, - CLK_MUX_ROUND_CLOSEST); + return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST); } EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); @@ -759,14 +777,11 @@ int clk_enable(struct clk *clk) } EXPORT_SYMBOL_GPL(clk_enable); -static unsigned long clk_core_round_rate_nolock(struct clk_core *core, - unsigned long rate, - unsigned long min_rate, - unsigned long max_rate) +static int clk_core_round_rate_nolock(struct clk_core *core, + struct clk_rate_request *req) { - unsigned long parent_rate = 0; struct clk_core *parent; - struct clk_hw *parent_hw; + long rate; lockdep_assert_held(&prepare_lock); @@ -774,21 +789,30 @@ static unsigned long clk_core_round_rate_nolock(struct clk_core *core, return 0; parent = core->parent; - if (parent) - parent_rate = parent->rate; + if (parent) { + req->best_parent_hw = parent->hw; + req->best_parent_rate = parent->rate; + } else { + req->best_parent_hw = NULL; + req->best_parent_rate = 0; + } if (core->ops->determine_rate) { - parent_hw = parent ? parent->hw : NULL; - return core->ops->determine_rate(core->hw, rate, - min_rate, max_rate, - &parent_rate, &parent_hw); - } else if (core->ops->round_rate) - return core->ops->round_rate(core->hw, rate, &parent_rate); - else if (core->flags & CLK_SET_RATE_PARENT) - return clk_core_round_rate_nolock(core->parent, rate, min_rate, - max_rate); - else - return core->rate; + return core->ops->determine_rate(core->hw, req); + } else if (core->ops->round_rate) { + rate = core->ops->round_rate(core->hw, req->rate, + &req->best_parent_rate); + if (rate < 0) + return rate; + + req->rate = rate; + } else if (core->flags & CLK_SET_RATE_PARENT) { + return clk_core_round_rate_nolock(parent, req); + } else { + req->rate = core->rate; + } + + return 0; } /** @@ -800,38 +824,32 @@ static unsigned long clk_core_round_rate_nolock(struct clk_core *core, * * Useful for clk_ops such as .set_rate and .determine_rate. */ -unsigned long __clk_determine_rate(struct clk_hw *hw, - unsigned long rate, - unsigned long min_rate, - unsigned long max_rate) +int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { - if (!hw) + if (!hw) { + req->rate = 0; return 0; + } - return clk_core_round_rate_nolock(hw->core, rate, min_rate, max_rate); + return clk_core_round_rate_nolock(hw->core, req); } EXPORT_SYMBOL_GPL(__clk_determine_rate); -/** - * __clk_round_rate - round the given rate for a clk - * @clk: round the rate of this clock - * @rate: the rate which is to be rounded - * - * Useful for clk_ops such as .set_rate - */ -unsigned long __clk_round_rate(struct clk *clk, unsigned long rate) +unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate) { - unsigned long min_rate; - unsigned long max_rate; + int ret; + struct clk_rate_request req; - if (!clk) - return 0; + clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate); + req.rate = rate; - clk_core_get_boundaries(clk->core, &min_rate, &max_rate); + ret = clk_core_round_rate_nolock(hw->core, &req); + if (ret) + return 0; - return clk_core_round_rate_nolock(clk->core, rate, min_rate, max_rate); + return req.rate; } -EXPORT_SYMBOL_GPL(__clk_round_rate); +EXPORT_SYMBOL_GPL(clk_hw_round_rate); /** * clk_round_rate - round the given rate for a clk @@ -844,16 +862,24 @@ EXPORT_SYMBOL_GPL(__clk_round_rate); */ long clk_round_rate(struct clk *clk, unsigned long rate) { - unsigned long ret; + struct clk_rate_request req; + int ret; if (!clk) return 0; clk_prepare_lock(); - ret = __clk_round_rate(clk, rate); + + clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate); + req.rate = rate; + + ret = clk_core_round_rate_nolock(clk->core, &req); clk_prepare_unlock(); - return ret; + if (ret) + return ret; + + return req.rate; } EXPORT_SYMBOL_GPL(clk_round_rate); @@ -1064,18 +1090,40 @@ static int clk_fetch_parent_index(struct clk_core *core, return -EINVAL; } +/* + * Update the orphan status of @core and all its children. + */ +static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan) +{ + struct clk_core *child; + + core->orphan = is_orphan; + + hlist_for_each_entry(child, &core->children, child_node) + clk_core_update_orphan_status(child, is_orphan); +} + static void clk_reparent(struct clk_core *core, struct clk_core *new_parent) { + bool was_orphan = core->orphan; + hlist_del(&core->child_node); if (new_parent) { + bool becomes_orphan = new_parent->orphan; + /* avoid duplicate POST_RATE_CHANGE notifications */ if (new_parent->new_child == core) new_parent->new_child = NULL; hlist_add_head(&core->child_node, &new_parent->children); + + if (was_orphan != becomes_orphan) + clk_core_update_orphan_status(core, becomes_orphan); } else { hlist_add_head(&core->child_node, &clk_orphan_list); + if (!was_orphan) + clk_core_update_orphan_status(core, true); } core->parent = new_parent; @@ -1160,14 +1208,8 @@ static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, flags = clk_enable_lock(); clk_reparent(core, old_parent); clk_enable_unlock(flags); + __clk_set_parent_after(core, old_parent, parent); - if (core->prepare_count) { - flags = clk_enable_lock(); - clk_core_disable(core); - clk_core_disable(parent); - clk_enable_unlock(flags); - clk_core_unprepare(parent); - } return ret; } @@ -1249,7 +1291,6 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core, { struct clk_core *top = core; struct clk_core *old_parent, *parent; - struct clk_hw *parent_hw; unsigned long best_parent_rate = 0; unsigned long new_rate; unsigned long min_rate; @@ -1270,20 +1311,29 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core, /* find the closest rate and parent clk/rate */ if (core->ops->determine_rate) { - parent_hw = parent ? parent->hw : NULL; - ret = core->ops->determine_rate(core->hw, rate, - min_rate, - max_rate, - &best_parent_rate, - &parent_hw); + struct clk_rate_request req; + + req.rate = rate; + req.min_rate = min_rate; + req.max_rate = max_rate; + if (parent) { + req.best_parent_hw = parent->hw; + req.best_parent_rate = parent->rate; + } else { + req.best_parent_hw = NULL; + req.best_parent_rate = 0; + } + + ret = core->ops->determine_rate(core->hw, &req); if (ret < 0) return NULL; - new_rate = ret; - parent = parent_hw ? parent_hw->core : NULL; + best_parent_rate = req.best_parent_rate; + new_rate = req.rate; + parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; } else if (core->ops->round_rate) { ret = core->ops->round_rate(core->hw, rate, - &best_parent_rate); + &best_parent_rate); if (ret < 0) return NULL; @@ -1592,8 +1642,12 @@ struct clk *clk_get_parent(struct clk *clk) { struct clk *parent; + if (!clk) + return NULL; + clk_prepare_lock(); - parent = __clk_get_parent(clk); + /* TODO: Create a per-user clk and change callers to call clk_put */ + parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk; clk_prepare_unlock(); return parent; @@ -2324,13 +2378,17 @@ static int __clk_init(struct device *dev, struct clk *clk_user) * clocks and re-parent any that are children of the clock currently * being clk_init'd. */ - if (core->parent) + if (core->parent) { hlist_add_head(&core->child_node, &core->parent->children); - else if (core->flags & CLK_IS_ROOT) + core->orphan = core->parent->orphan; + } else if (core->flags & CLK_IS_ROOT) { hlist_add_head(&core->child_node, &clk_root_list); - else + core->orphan = false; + } else { hlist_add_head(&core->child_node, &clk_orphan_list); + core->orphan = true; + } /* * Set clk's accuracy. The preferred method is to use @@ -2379,7 +2437,8 @@ static int __clk_init(struct device *dev, struct clk *clk_user) hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { if (orphan->num_parents && orphan->ops->get_parent) { i = orphan->ops->get_parent(orphan->hw); - if (!strcmp(core->name, orphan->parent_names[i])) + if (i >= 0 && i < orphan->num_parents && + !strcmp(core->name, orphan->parent_names[i])) clk_core_reparent(orphan, core); continue; } @@ -2479,6 +2538,8 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw) core->hw = hw; core->flags = hw->init->flags; core->num_parents = hw->init->num_parents; + core->min_rate = 0; + core->max_rate = ULONG_MAX; hw->core = core; /* allocate local copy in case parent_names is __initdata */ @@ -3054,8 +3115,6 @@ struct clock_provider { struct list_head node; }; -static LIST_HEAD(clk_provider_list); - /* * This function looks for a parent clock. If there is one, then it * checks that the provider for this parent clock was initialized, in @@ -3106,14 +3165,24 @@ void __init of_clk_init(const struct of_device_id *matches) struct clock_provider *clk_provider, *next; bool is_init_done; bool force = false; + LIST_HEAD(clk_provider_list); if (!matches) matches = &__clk_of_table; /* First prepare the list of the clocks providers */ for_each_matching_node_and_match(np, matches, &match) { - struct clock_provider *parent = - kzalloc(sizeof(struct clock_provider), GFP_KERNEL); + struct clock_provider *parent; + + parent = kzalloc(sizeof(*parent), GFP_KERNEL); + if (!parent) { + list_for_each_entry_safe(clk_provider, next, + &clk_provider_list, node) { + list_del(&clk_provider->node); + kfree(clk_provider); + } + return; + } parent->clk_init_cb = match->data; parent->np = np; diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c index c0eaf0973..779b6ff0c 100644 --- a/drivers/clk/clkdev.c +++ b/drivers/clk/clkdev.c @@ -333,7 +333,8 @@ int clk_add_alias(const char *alias, const char *alias_dev_name, if (IS_ERR(r)) return PTR_ERR(r); - l = clkdev_create(r, alias, "%s", alias_dev_name); + l = clkdev_create(r, alias, alias_dev_name ? "%s" : NULL, + alias_dev_name); clk_put(r); return l ? 0 : -ENODEV; diff --git a/drivers/clk/h8300/clk-div.c b/drivers/clk/h8300/clk-div.c index 56f9eba91..1dd5d14d5 100644 --- a/drivers/clk/h8300/clk-div.c +++ b/drivers/clk/h8300/clk-div.c @@ -4,8 +4,6 @@ * Copyright 2015 Yoshinori Sato */ -#include -#include #include #include #include @@ -15,7 +13,7 @@ static DEFINE_SPINLOCK(clklock); static void __init h8300_div_clk_setup(struct device_node *node) { - unsigned int num_parents; + int num_parents; struct clk *clk; const char *clk_name = node->name; const char *parent_name; diff --git a/drivers/clk/h8300/clk-h8s2678.c b/drivers/clk/h8300/clk-h8s2678.c index 4701b093e..6cf38dc1c 100644 --- a/drivers/clk/h8300/clk-h8s2678.c +++ b/drivers/clk/h8300/clk-h8s2678.c @@ -4,12 +4,11 @@ * Copyright 2015 Yoshinori Sato */ -#include -#include #include #include #include #include +#include static DEFINE_SPINLOCK(clklock); @@ -28,7 +27,7 @@ static unsigned long pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct pll_clock *pll_clock = to_pll_clock(hw); - int mul = 1 << (ctrl_inb((unsigned long)pll_clock->pllcr) & 3); + int mul = 1 << (readb(pll_clock->pllcr) & 3); return parent_rate * mul; } @@ -65,13 +64,13 @@ static int pll_set_rate(struct clk_hw *hw, unsigned long rate, pll = ((rate / parent_rate) / 2) & 0x03; spin_lock_irqsave(&clklock, flags); - val = ctrl_inb((unsigned long)pll_clock->sckcr); + val = readb(pll_clock->sckcr); val |= 0x08; - ctrl_outb(val, (unsigned long)pll_clock->sckcr); - val = ctrl_inb((unsigned long)pll_clock->pllcr); + writeb(val, pll_clock->sckcr); + val = readb(pll_clock->pllcr); val &= ~0x03; val |= pll; - ctrl_outb(val, (unsigned long)pll_clock->pllcr); + writeb(val, pll_clock->pllcr); spin_unlock_irqrestore(&clklock, flags); return 0; } @@ -84,7 +83,7 @@ static const struct clk_ops pll_ops = { static void __init h8s2678_pll_clk_setup(struct device_node *node) { - unsigned int num_parents; + int num_parents; struct clk *clk; const char *clk_name = node->name; const char *parent_name; @@ -98,11 +97,9 @@ static void __init h8s2678_pll_clk_setup(struct device_node *node) } - pll_clock = kzalloc(sizeof(struct pll_clock), GFP_KERNEL); - if (!pll_clock) { - pr_err("%s: failed to alloc memory", clk_name); + pll_clock = kzalloc(sizeof(*pll_clock), GFP_KERNEL); + if (!pll_clock) return; - } pll_clock->sckcr = of_iomap(node, 0); if (pll_clock->sckcr == NULL) { diff --git a/drivers/clk/hisilicon/Kconfig b/drivers/clk/hisilicon/Kconfig index b4165ba75..e43485448 100644 --- a/drivers/clk/hisilicon/Kconfig +++ b/drivers/clk/hisilicon/Kconfig @@ -4,3 +4,9 @@ config COMMON_CLK_HI6220 default ARCH_HISI help Build the Hisilicon Hi6220 clock driver based on the common clock framework. + +config STUB_CLK_HI6220 + bool "Hi6220 Stub Clock Driver" + depends on COMMON_CLK_HI6220 && MAILBOX + help + Build the Hisilicon Hi6220 stub clock driver. diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile index 48f0116a0..74dba3159 100644 --- a/drivers/clk/hisilicon/Makefile +++ b/drivers/clk/hisilicon/Makefile @@ -8,3 +8,4 @@ obj-$(CONFIG_ARCH_HI3xxx) += clk-hi3620.o obj-$(CONFIG_ARCH_HIP04) += clk-hip04.o obj-$(CONFIG_ARCH_HIX5HD2) += clk-hix5hd2.o obj-$(CONFIG_COMMON_CLK_HI6220) += clk-hi6220.o +obj-$(CONFIG_STUB_CLK_HI6220) += clk-hi6220-stub.o diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c index 715d34a5e..7d03fe17d 100644 --- a/drivers/clk/hisilicon/clk-hi3620.c +++ b/drivers/clk/hisilicon/clk-hi3620.c @@ -25,13 +25,11 @@ #include #include -#include #include #include #include #include #include -#include #include @@ -294,34 +292,29 @@ static unsigned long mmc_clk_recalc_rate(struct clk_hw *hw, } } -static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_p) +static int mmc_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { struct clk_mmc *mclk = to_mmc(hw); - unsigned long best = 0; - if ((rate <= 13000000) && (mclk->id == HI3620_MMC_CIUCLK1)) { - rate = 13000000; - best = 26000000; - } else if (rate <= 26000000) { - rate = 25000000; - best = 180000000; - } else if (rate <= 52000000) { - rate = 50000000; - best = 360000000; - } else if (rate <= 100000000) { - rate = 100000000; - best = 720000000; + if ((req->rate <= 13000000) && (mclk->id == HI3620_MMC_CIUCLK1)) { + req->rate = 13000000; + req->best_parent_rate = 26000000; + } else if (req->rate <= 26000000) { + req->rate = 25000000; + req->best_parent_rate = 180000000; + } else if (req->rate <= 52000000) { + req->rate = 50000000; + req->best_parent_rate = 360000000; + } else if (req->rate <= 100000000) { + req->rate = 100000000; + req->best_parent_rate = 720000000; } else { /* max is 180M */ - rate = 180000000; - best = 1440000000; + req->rate = 180000000; + req->best_parent_rate = 1440000000; } - *best_parent_rate = best; - return rate; + return -EINVAL; } static u32 mmc_clk_delay(u32 val, u32 para, u32 off, u32 len) diff --git a/drivers/clk/hisilicon/clk-hi6220-stub.c b/drivers/clk/hisilicon/clk-hi6220-stub.c new file mode 100644 index 000000000..2c4add11c --- /dev/null +++ b/drivers/clk/hisilicon/clk-hi6220-stub.c @@ -0,0 +1,276 @@ +/* + * Hi6220 stub clock driver + * + * Copyright (c) 2015 Hisilicon Limited. + * Copyright (c) 2015 Linaro Limited. + * + * Author: Leo Yan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* Stub clocks id */ +#define HI6220_STUB_ACPU0 0 +#define HI6220_STUB_ACPU1 1 +#define HI6220_STUB_GPU 2 +#define HI6220_STUB_DDR 5 + +/* Mailbox message */ +#define HI6220_MBOX_MSG_LEN 8 + +#define HI6220_MBOX_FREQ 0xA +#define HI6220_MBOX_CMD_SET 0x3 +#define HI6220_MBOX_OBJ_AP 0x0 + +/* CPU dynamic frequency scaling */ +#define ACPU_DFS_FREQ_MAX 0x1724 +#define ACPU_DFS_CUR_FREQ 0x17CC +#define ACPU_DFS_FLAG 0x1B30 +#define ACPU_DFS_FREQ_REQ 0x1B34 +#define ACPU_DFS_FREQ_LMT 0x1B38 +#define ACPU_DFS_LOCK_FLAG 0xAEAEAEAE + +#define to_stub_clk(hw) container_of(hw, struct hi6220_stub_clk, hw) + +struct hi6220_stub_clk { + u32 id; + + struct device *dev; + struct clk_hw hw; + + struct regmap *dfs_map; + struct mbox_client cl; + struct mbox_chan *mbox; +}; + +struct hi6220_mbox_msg { + unsigned char type; + unsigned char cmd; + unsigned char obj; + unsigned char src; + unsigned char para[4]; +}; + +union hi6220_mbox_data { + unsigned int data[HI6220_MBOX_MSG_LEN]; + struct hi6220_mbox_msg msg; +}; + +static unsigned int hi6220_acpu_get_freq(struct hi6220_stub_clk *stub_clk) +{ + unsigned int freq; + + regmap_read(stub_clk->dfs_map, ACPU_DFS_CUR_FREQ, &freq); + return freq; +} + +static int hi6220_acpu_set_freq(struct hi6220_stub_clk *stub_clk, + unsigned int freq) +{ + union hi6220_mbox_data data; + + /* set the frequency in sram */ + regmap_write(stub_clk->dfs_map, ACPU_DFS_FREQ_REQ, freq); + + /* compound mailbox message */ + data.msg.type = HI6220_MBOX_FREQ; + data.msg.cmd = HI6220_MBOX_CMD_SET; + data.msg.obj = HI6220_MBOX_OBJ_AP; + data.msg.src = HI6220_MBOX_OBJ_AP; + + mbox_send_message(stub_clk->mbox, &data); + return 0; +} + +static int hi6220_acpu_round_freq(struct hi6220_stub_clk *stub_clk, + unsigned int freq) +{ + unsigned int limit_flag, limit_freq = UINT_MAX; + unsigned int max_freq; + + /* check the constrained frequency */ + regmap_read(stub_clk->dfs_map, ACPU_DFS_FLAG, &limit_flag); + if (limit_flag == ACPU_DFS_LOCK_FLAG) + regmap_read(stub_clk->dfs_map, ACPU_DFS_FREQ_LMT, &limit_freq); + + /* check the supported maximum frequency */ + regmap_read(stub_clk->dfs_map, ACPU_DFS_FREQ_MAX, &max_freq); + + /* calculate the real maximum frequency */ + max_freq = min(max_freq, limit_freq); + + if (WARN_ON(freq > max_freq)) + freq = max_freq; + + return freq; +} + +static unsigned long hi6220_stub_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + u32 rate = 0; + struct hi6220_stub_clk *stub_clk = to_stub_clk(hw); + + switch (stub_clk->id) { + case HI6220_STUB_ACPU0: + rate = hi6220_acpu_get_freq(stub_clk); + + /* convert from kHz to Hz */ + rate *= 1000; + break; + + default: + dev_err(stub_clk->dev, "%s: un-supported clock id %d\n", + __func__, stub_clk->id); + break; + } + + return rate; +} + +static int hi6220_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct hi6220_stub_clk *stub_clk = to_stub_clk(hw); + unsigned long new_rate = rate / 1000; /* kHz */ + int ret = 0; + + switch (stub_clk->id) { + case HI6220_STUB_ACPU0: + ret = hi6220_acpu_set_freq(stub_clk, new_rate); + if (ret < 0) + return ret; + + break; + + default: + dev_err(stub_clk->dev, "%s: un-supported clock id %d\n", + __func__, stub_clk->id); + break; + } + + pr_debug("%s: set rate=%ldkHz\n", __func__, new_rate); + return ret; +} + +static long hi6220_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct hi6220_stub_clk *stub_clk = to_stub_clk(hw); + unsigned long new_rate = rate / 1000; /* kHz */ + + switch (stub_clk->id) { + case HI6220_STUB_ACPU0: + new_rate = hi6220_acpu_round_freq(stub_clk, new_rate); + + /* convert from kHz to Hz */ + new_rate *= 1000; + break; + + default: + dev_err(stub_clk->dev, "%s: un-supported clock id %d\n", + __func__, stub_clk->id); + break; + } + + return new_rate; +} + +static const struct clk_ops hi6220_stub_clk_ops = { + .recalc_rate = hi6220_stub_clk_recalc_rate, + .round_rate = hi6220_stub_clk_round_rate, + .set_rate = hi6220_stub_clk_set_rate, +}; + +static int hi6220_stub_clk_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct clk_init_data init; + struct hi6220_stub_clk *stub_clk; + struct clk *clk; + struct device_node *np = pdev->dev.of_node; + int ret; + + stub_clk = devm_kzalloc(dev, sizeof(*stub_clk), GFP_KERNEL); + if (!stub_clk) + return -ENOMEM; + + stub_clk->dfs_map = syscon_regmap_lookup_by_phandle(np, + "hisilicon,hi6220-clk-sram"); + if (IS_ERR(stub_clk->dfs_map)) { + dev_err(dev, "failed to get sram regmap\n"); + return PTR_ERR(stub_clk->dfs_map); + } + + stub_clk->hw.init = &init; + stub_clk->dev = dev; + stub_clk->id = HI6220_STUB_ACPU0; + + /* Use mailbox client with blocking mode */ + stub_clk->cl.dev = dev; + stub_clk->cl.tx_done = NULL; + stub_clk->cl.tx_block = true; + stub_clk->cl.tx_tout = 500; + stub_clk->cl.knows_txdone = false; + + /* Allocate mailbox channel */ + stub_clk->mbox = mbox_request_channel(&stub_clk->cl, 0); + if (IS_ERR(stub_clk->mbox)) { + dev_err(dev, "failed get mailbox channel\n"); + return PTR_ERR(stub_clk->mbox); + }; + + init.name = "acpu0"; + init.ops = &hi6220_stub_clk_ops; + init.num_parents = 0; + init.flags = CLK_IS_ROOT; + + clk = devm_clk_register(dev, &stub_clk->hw); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + ret = of_clk_add_provider(np, of_clk_src_simple_get, clk); + if (ret) { + dev_err(dev, "failed to register OF clock provider\n"); + return ret; + } + + /* initialize buffer to zero */ + regmap_write(stub_clk->dfs_map, ACPU_DFS_FLAG, 0x0); + regmap_write(stub_clk->dfs_map, ACPU_DFS_FREQ_REQ, 0x0); + regmap_write(stub_clk->dfs_map, ACPU_DFS_FREQ_LMT, 0x0); + + dev_dbg(dev, "Registered clock '%s'\n", init.name); + return 0; +} + +static const struct of_device_id hi6220_stub_clk_of_match[] = { + { .compatible = "hisilicon,hi6220-stub-clk", }, + {} +}; + +static struct platform_driver hi6220_stub_clk_driver = { + .driver = { + .name = "hi6220-stub-clk", + .of_match_table = hi6220_stub_clk_of_match, + }, + .probe = hi6220_stub_clk_probe, +}; + +static int __init hi6220_stub_clk_init(void) +{ + return platform_driver_register(&hi6220_stub_clk_driver); +} +subsys_initcall(hi6220_stub_clk_init); diff --git a/drivers/clk/hisilicon/clk-hip04.c b/drivers/clk/hisilicon/clk-hip04.c index 132b57a0c..8ca967308 100644 --- a/drivers/clk/hisilicon/clk-hip04.c +++ b/drivers/clk/hisilicon/clk-hip04.c @@ -24,13 +24,11 @@ #include #include -#include #include #include #include #include #include -#include #include diff --git a/drivers/clk/hisilicon/clk.c b/drivers/clk/hisilicon/clk.c index c90a89739..9f8e76676 100644 --- a/drivers/clk/hisilicon/clk.c +++ b/drivers/clk/hisilicon/clk.c @@ -24,15 +24,14 @@ */ #include -#include #include +#include #include #include #include #include #include #include -#include #include "clk.h" @@ -45,14 +44,9 @@ struct hisi_clock_data __init *hisi_clk_init(struct device_node *np, struct clk **clk_table; void __iomem *base; - if (np) { - base = of_iomap(np, 0); - if (!base) { - pr_err("failed to map Hisilicon clock registers\n"); - goto err; - } - } else { - pr_err("failed to find Hisilicon clock node in DTS\n"); + base = of_iomap(np, 0); + if (!base) { + pr_err("%s: failed to map clock registers\n", __func__); goto err; } diff --git a/drivers/clk/hisilicon/clkgate-separated.c b/drivers/clk/hisilicon/clkgate-separated.c index b03d5a724..a47812f56 100644 --- a/drivers/clk/hisilicon/clkgate-separated.c +++ b/drivers/clk/hisilicon/clkgate-separated.c @@ -25,10 +25,8 @@ #include #include -#include #include #include -#include #include "clk.h" diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile index 75fae169c..1ada68abb 100644 --- a/drivers/clk/imx/Makefile +++ b/drivers/clk/imx/Makefile @@ -22,5 +22,6 @@ obj-$(CONFIG_SOC_IMX5) += clk-imx51-imx53.o obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o obj-$(CONFIG_SOC_IMX6SX) += clk-imx6sx.o +obj-$(CONFIG_SOC_IMX6UL) += clk-imx6ul.o obj-$(CONFIG_SOC_IMX7D) += clk-imx7d.o obj-$(CONFIG_SOC_VF610) += clk-vf610.o diff --git a/drivers/clk/imx/clk-imx1.c b/drivers/clk/imx/clk-imx1.c index c2647fa19..99cf802fa 100644 --- a/drivers/clk/imx/clk-imx1.c +++ b/drivers/clk/imx/clk-imx1.c @@ -15,7 +15,6 @@ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */ -#include #include #include #include diff --git a/drivers/clk/imx/clk-imx21.c b/drivers/clk/imx/clk-imx21.c index dba987e3b..e63188eb0 100644 --- a/drivers/clk/imx/clk-imx21.c +++ b/drivers/clk/imx/clk-imx21.c @@ -9,7 +9,6 @@ * of the License, or (at your option) any later version. */ -#include #include #include #include diff --git a/drivers/clk/imx/clk-imx31.c b/drivers/clk/imx/clk-imx31.c index fe66c40b7..1f8383475 100644 --- a/drivers/clk/imx/clk-imx31.c +++ b/drivers/clk/imx/clk-imx31.c @@ -147,7 +147,8 @@ int __init mx31_clocks_init(unsigned long fref) clk_register_clkdev(clk[cspi3_gate], NULL, "imx31-cspi.2"); clk_register_clkdev(clk[pwm_gate], "pwm", NULL); clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0"); - clk_register_clkdev(clk[rtc_gate], NULL, "imx21-rtc"); + clk_register_clkdev(clk[ckil], "ref", "imx21-rtc"); + clk_register_clkdev(clk[rtc_gate], "ipg", "imx21-rtc"); clk_register_clkdev(clk[epit1_gate], "epit", NULL); clk_register_clkdev(clk[epit2_gate], "epit", NULL); clk_register_clkdev(clk[nfc], NULL, "imx27-nand.0"); diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c index 69138ba3d..8623cd4e4 100644 --- a/drivers/clk/imx/clk-imx35.c +++ b/drivers/clk/imx/clk-imx35.c @@ -66,7 +66,7 @@ static const char *std_sel[] = {"ppll", "arm"}; static const char *ipg_per_sel[] = {"ahb_per_div", "arm_per_div"}; enum mx35_clks { - ckih, mpll, ppll, mpll_075, arm, hsp, hsp_div, hsp_sel, ahb, ipg, + ckih, ckil, mpll, ppll, mpll_075, arm, hsp, hsp_div, hsp_sel, ahb, ipg, arm_per_div, ahb_per_div, ipg_per, uart_sel, uart_div, esdhc_sel, esdhc1_div, esdhc2_div, esdhc3_div, spdif_sel, spdif_div_pre, spdif_div_post, ssi_sel, ssi1_div_pre, ssi1_div_post, ssi2_div_pre, @@ -107,6 +107,7 @@ int __init mx35_clocks_init(void) } clk[ckih] = imx_clk_fixed("ckih", 24000000); + clk[ckil] = imx_clk_fixed("ckih", 32768); clk[mpll] = imx_clk_pllv1(IMX_PLLV1_IMX35, "mpll", "ckih", base + MX35_CCM_MPCTL); clk[ppll] = imx_clk_pllv1(IMX_PLLV1_IMX35, "ppll", "ckih", base + MX35_CCM_PPCTL); @@ -258,6 +259,9 @@ int __init mx35_clocks_init(void) clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.1"); clk_register_clkdev(clk[uart3_gate], "per", "imx21-uart.2"); clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.2"); + /* i.mx35 has the i.mx21 type rtc */ + clk_register_clkdev(clk[ckil], "ref", "imx21-rtc"); + clk_register_clkdev(clk[rtc_gate], "ipg", "imx21-rtc"); clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0"); clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0"); clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.0"); diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c index d046f8e43..b2c1c047d 100644 --- a/drivers/clk/imx/clk-imx6q.c +++ b/drivers/clk/imx/clk-imx6q.c @@ -381,6 +381,9 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) clk[IMX6QDL_CLK_ASRC] = imx_clk_gate2_shared("asrc", "asrc_podf", base + 0x68, 6, &share_count_asrc); clk[IMX6QDL_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc); clk[IMX6QDL_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc); + clk[IMX6QDL_CLK_CAAM_MEM] = imx_clk_gate2("caam_mem", "ahb", base + 0x68, 8); + clk[IMX6QDL_CLK_CAAM_ACLK] = imx_clk_gate2("caam_aclk", "ahb", base + 0x68, 10); + clk[IMX6QDL_CLK_CAAM_IPG] = imx_clk_gate2("caam_ipg", "ipg", base + 0x68, 12); clk[IMX6QDL_CLK_CAN1_IPG] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14); clk[IMX6QDL_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_root", base + 0x68, 16); clk[IMX6QDL_CLK_CAN2_IPG] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18); @@ -494,6 +497,10 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) clk_set_parent(clk[IMX6QDL_CLK_LDB_DI1_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]); } + clk_set_rate(clk[IMX6QDL_CLK_PLL3_PFD1_540M], 540000000); + if (clk_on_imx6dl()) + clk_set_parent(clk[IMX6QDL_CLK_IPU1_SEL], clk[IMX6QDL_CLK_PLL3_PFD1_540M]); + clk_set_parent(clk[IMX6QDL_CLK_IPU1_DI0_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]); clk_set_parent(clk[IMX6QDL_CLK_IPU1_DI1_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]); clk_set_parent(clk[IMX6QDL_CLK_IPU2_DI0_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]); diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c new file mode 100644 index 000000000..aaa366506 --- /dev/null +++ b/drivers/clk/imx/clk-imx6ul.c @@ -0,0 +1,432 @@ +/* + * Copyright (C) 2015 Freescale Semiconductor, Inc. + * + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "clk.h" + +#define BM_CCM_CCDR_MMDC_CH0_MASK (0x2 << 16) +#define CCDR 0x4 + +static const char *pll_bypass_src_sels[] = { "osc", "dummy", }; +static const char *pll1_bypass_sels[] = { "pll1", "pll1_bypass_src", }; +static const char *pll2_bypass_sels[] = { "pll2", "pll2_bypass_src", }; +static const char *pll3_bypass_sels[] = { "pll3", "pll3_bypass_src", }; +static const char *pll4_bypass_sels[] = { "pll4", "pll4_bypass_src", }; +static const char *pll5_bypass_sels[] = { "pll5", "pll5_bypass_src", }; +static const char *pll6_bypass_sels[] = { "pll6", "pll6_bypass_src", }; +static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", }; +static const char *ca7_secondary_sels[] = { "pll2_pfd2_396m", "pll2_bus", }; +static const char *step_sels[] = { "osc", "ca7_secondary_sel", }; +static const char *pll1_sw_sels[] = { "pll1_sys", "step", }; +static const char *axi_alt_sels[] = { "pll2_pfd2_396m", "pll3_pfd1_540m", }; +static const char *axi_sels[] = {"periph", "axi_alt_sel", }; +static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", }; +static const char *periph2_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll4_audio_div", }; +static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", "osc", }; +static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "osc", }; +static const char *periph_sels[] = { "periph_pre", "periph_clk2", }; +static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", }; +static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", }; +static const char *bch_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", }; +static const char *gpmi_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", }; +static const char *eim_slow_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll3_pfd0_720m", }; +static const char *spdif_sels[] = { "pll4_audio_div", "pll3_pfd2_508m", "pll5_video_div", "pll3_usb_otg", }; +static const char *sai_sels[] = { "pll3_pfd2_508m", "pll5_video_div", "pll4_audio_div", }; +static const char *lcdif_pre_sels[] = { "pll2_bus", "pll3_pfd3_454m", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd1_594m", "pll3_pfd1_540m", }; +static const char *sim_pre_sels[] = { "pll2_bus", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd2_508m", }; +static const char *ldb_di0_sels[] = { "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll2_pfd3_594m", "pll2_pfd1_594m", "pll3_pfd3_454m", }; +static const char *ldb_di0_div_sels[] = { "ldb_di0_div_3_5", "ldb_di0_div_7", }; +static const char *ldb_di1_div_sels[] = { "ldb_di1_div_3_5", "ldb_di1_div_7", }; +static const char *qspi1_sels[] = { "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll2_bus", "pll3_pfd3_454m", "pll3_pfd2_508m", }; +static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", "pll3_pfd3_454m", "dummy", "dummy", "dummy", }; +static const char *can_sels[] = { "pll3_60m", "osc", "pll3_80m", "dummy", }; +static const char *ecspi_sels[] = { "pll3_60m", "osc", }; +static const char *uart_sels[] = { "pll3_80m", "osc", }; +static const char *perclk_sels[] = { "ipg", "osc", }; +static const char *lcdif_sels[] = { "lcdif_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", }; +static const char *csi_sels[] = { "osc", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; +static const char *sim_sels[] = { "sim_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", }; + +static struct clk *clks[IMX6UL_CLK_END]; +static struct clk_onecell_data clk_data; + +static int const clks_init_on[] __initconst = { + IMX6UL_CLK_AIPSTZ1, IMX6UL_CLK_AIPSTZ2, IMX6UL_CLK_AIPSTZ3, + IMX6UL_CLK_AXI, IMX6UL_CLK_ARM, IMX6UL_CLK_ROM, + IMX6UL_CLK_MMDC_P0_FAST, IMX6UL_CLK_MMDC_P0_IPG, +}; + +static struct clk_div_table clk_enet_ref_table[] = { + { .val = 0, .div = 20, }, + { .val = 1, .div = 10, }, + { .val = 2, .div = 5, }, + { .val = 3, .div = 4, }, + { } +}; + +static struct clk_div_table post_div_table[] = { + { .val = 2, .div = 1, }, + { .val = 1, .div = 2, }, + { .val = 0, .div = 4, }, + { } +}; + +static struct clk_div_table video_div_table[] = { + { .val = 0, .div = 1, }, + { .val = 1, .div = 2, }, + { .val = 2, .div = 1, }, + { .val = 3, .div = 4, }, + { } +}; + +static u32 share_count_asrc; +static u32 share_count_audio; +static u32 share_count_sai1; +static u32 share_count_sai2; +static u32 share_count_sai3; + +static void __init imx6ul_clocks_init(struct device_node *ccm_node) +{ + struct device_node *np; + void __iomem *base; + int i; + + clks[IMX6UL_CLK_DUMMY] = imx_clk_fixed("dummy", 0); + + clks[IMX6UL_CLK_CKIL] = of_clk_get_by_name(ccm_node, "ckil"); + clks[IMX6UL_CLK_OSC] = of_clk_get_by_name(ccm_node, "osc"); + + /* ipp_di clock is external input */ + clks[IMX6UL_CLK_IPP_DI0] = of_clk_get_by_name(ccm_node, "ipp_di0"); + clks[IMX6UL_CLK_IPP_DI1] = of_clk_get_by_name(ccm_node, "ipp_di1"); + + np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-anatop"); + base = of_iomap(np, 0); + WARN_ON(!base); + + clks[IMX6UL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + clks[IMX6UL_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + clks[IMX6UL_PLL3_BYPASS_SRC] = imx_clk_mux("pll3_bypass_src", base + 0x10, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + clks[IMX6UL_PLL4_BYPASS_SRC] = imx_clk_mux("pll4_bypass_src", base + 0x70, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + clks[IMX6UL_PLL5_BYPASS_SRC] = imx_clk_mux("pll5_bypass_src", base + 0xa0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + clks[IMX6UL_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", base + 0xe0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + clks[IMX6UL_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + + clks[IMX6UL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "pll1_bypass_src", base + 0x00, 0x7f); + clks[IMX6UL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", base + 0x30, 0x1); + clks[IMX6UL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", base + 0x10, 0x3); + clks[IMX6UL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", base + 0x70, 0x7f); + clks[IMX6UL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "pll5_bypass_src", base + 0xa0, 0x7f); + clks[IMX6UL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "pll6_bypass_src", base + 0xe0, 0x3); + clks[IMX6UL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", base + 0x20, 0x3); + + clks[IMX6UL_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX6UL_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX6UL_PLL3_BYPASS] = imx_clk_mux_flags("pll3_bypass", base + 0x10, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX6UL_PLL4_BYPASS] = imx_clk_mux_flags("pll4_bypass", base + 0x70, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX6UL_PLL5_BYPASS] = imx_clk_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX6UL_PLL6_BYPASS] = imx_clk_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX6UL_PLL7_BYPASS] = imx_clk_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT); + clks[IMX6UL_CLK_CSI_SEL] = imx_clk_mux_flags("csi_sel", base + 0x3c, 9, 2, csi_sels, ARRAY_SIZE(csi_sels), CLK_SET_RATE_PARENT); + + /* Do not bypass PLLs initially */ + clk_set_parent(clks[IMX6UL_PLL1_BYPASS], clks[IMX6UL_CLK_PLL1]); + clk_set_parent(clks[IMX6UL_PLL2_BYPASS], clks[IMX6UL_CLK_PLL2]); + clk_set_parent(clks[IMX6UL_PLL3_BYPASS], clks[IMX6UL_CLK_PLL3]); + clk_set_parent(clks[IMX6UL_PLL4_BYPASS], clks[IMX6UL_CLK_PLL4]); + clk_set_parent(clks[IMX6UL_PLL5_BYPASS], clks[IMX6UL_CLK_PLL5]); + clk_set_parent(clks[IMX6UL_PLL6_BYPASS], clks[IMX6UL_CLK_PLL6]); + clk_set_parent(clks[IMX6UL_PLL7_BYPASS], clks[IMX6UL_CLK_PLL7]); + + clks[IMX6UL_CLK_PLL1_SYS] = imx_clk_fixed_factor("pll1_sys", "pll1_bypass", 1, 1); + clks[IMX6UL_CLK_PLL2_BUS] = imx_clk_gate("pll2_bus", "pll2_bypass", base + 0x30, 13); + clks[IMX6UL_CLK_PLL3_USB_OTG] = imx_clk_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13); + clks[IMX6UL_CLK_PLL4_AUDIO] = imx_clk_gate("pll4_audio", "pll4_bypass", base + 0x70, 13); + clks[IMX6UL_CLK_PLL5_VIDEO] = imx_clk_gate("pll5_video", "pll5_bypass", base + 0xa0, 13); + clks[IMX6UL_CLK_PLL6_ENET] = imx_clk_gate("pll6_enet", "pll6_bypass", base + 0xe0, 13); + clks[IMX6UL_CLK_PLL7_USB_HOST] = imx_clk_gate("pll7_usb_host", "pll7_bypass", base + 0x20, 13); + + /* + * Bit 20 is the reserved and read-only bit, we do this only for: + * - Do nothing for usbphy clk_enable/disable + * - Keep refcount when do usbphy clk_enable/disable, in that case, + * the clk framework many need to enable/disable usbphy's parent + */ + clks[IMX6UL_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20); + clks[IMX6UL_CLK_USBPHY2] = imx_clk_gate("usbphy2", "pll7_usb_host", base + 0x20, 20); + + /* + * usbphy*_gate needs to be on after system boots up, and software + * never needs to control it anymore. + */ + clks[IMX6UL_CLK_USBPHY1_GATE] = imx_clk_gate("usbphy1_gate", "dummy", base + 0x10, 6); + clks[IMX6UL_CLK_USBPHY2_GATE] = imx_clk_gate("usbphy2_gate", "dummy", base + 0x20, 6); + + /* name parent_name reg idx */ + clks[IMX6UL_CLK_PLL2_PFD0] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0); + clks[IMX6UL_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1); + clks[IMX6UL_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2); + clks[IMX6UL_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3_594m", "pll2_bus", base + 0x100, 3); + clks[IMX6UL_CLK_PLL3_PFD0] = imx_clk_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0); + clks[IMX6UL_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1); + clks[IMX6UL_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2); + clks[IMX6UL_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3); + + clks[IMX6UL_CLK_ENET_REF] = clk_register_divider_table(NULL, "enet_ref", "pll6_enet", 0, + base + 0xe0, 0, 2, 0, clk_enet_ref_table, &imx_ccm_lock); + clks[IMX6UL_CLK_ENET2_REF] = clk_register_divider_table(NULL, "enet2_ref", "pll6_enet", 0, + base + 0xe0, 2, 2, 0, clk_enet_ref_table, &imx_ccm_lock); + + clks[IMX6UL_CLK_ENET2_REF_125M] = imx_clk_gate("enet_ref_125m", "enet2_ref", base + 0xe0, 20); + clks[IMX6UL_CLK_ENET_PTP_REF] = imx_clk_fixed_factor("enet_ptp_ref", "pll6_enet", 1, 20); + clks[IMX6UL_CLK_ENET_PTP] = imx_clk_gate("enet_ptp", "enet_ptp_ref", base + 0xe0, 21); + + clks[IMX6UL_CLK_PLL4_POST_DIV] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio", + CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock); + clks[IMX6UL_CLK_PLL4_AUDIO_DIV] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div", + CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x170, 15, 1, 0, &imx_ccm_lock); + clks[IMX6UL_CLK_PLL5_POST_DIV] = clk_register_divider_table(NULL, "pll5_post_div", "pll5_video", + CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock); + clks[IMX6UL_CLK_PLL5_VIDEO_DIV] = clk_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", + CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock); + + /* name parent_name mult div */ + clks[IMX6UL_CLK_PLL2_198M] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2); + clks[IMX6UL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6); + clks[IMX6UL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8); + clks[IMX6UL_CLK_GPT_3M] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8); + + np = ccm_node; + base = of_iomap(np, 0); + WARN_ON(!base); + + clks[IMX6UL_CA7_SECONDARY_SEL] = imx_clk_mux("ca7_secondary_sel", base + 0xc, 3, 1, ca7_secondary_sels, ARRAY_SIZE(ca7_secondary_sels)); + clks[IMX6UL_CLK_STEP] = imx_clk_mux("step", base + 0x0c, 8, 1, step_sels, ARRAY_SIZE(step_sels)); + clks[IMX6UL_CLK_PLL1_SW] = imx_clk_mux_flags("pll1_sw", base + 0x0c, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels), 0); + clks[IMX6UL_CLK_AXI_ALT_SEL] = imx_clk_mux("axi_alt_sel", base + 0x14, 7, 1, axi_alt_sels, ARRAY_SIZE(axi_alt_sels)); + clks[IMX6UL_CLK_AXI_SEL] = imx_clk_mux_flags("axi_sel", base + 0x14, 6, 1, axi_sels, ARRAY_SIZE(axi_sels), 0); + clks[IMX6UL_CLK_PERIPH_PRE] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); + clks[IMX6UL_CLK_PERIPH2_PRE] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph2_pre_sels, ARRAY_SIZE(periph2_pre_sels)); + clks[IMX6UL_CLK_PERIPH_CLK2_SEL] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); + clks[IMX6UL_CLK_PERIPH2_CLK2_SEL] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels)); + clks[IMX6UL_CLK_EIM_SLOW_SEL] = imx_clk_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels)); + clks[IMX6UL_CLK_GPMI_SEL] = imx_clk_mux("gpmi_sel", base + 0x1c, 19, 1, gpmi_sels, ARRAY_SIZE(gpmi_sels)); + clks[IMX6UL_CLK_BCH_SEL] = imx_clk_mux("bch_sel", base + 0x1c, 18, 1, bch_sels, ARRAY_SIZE(bch_sels)); + clks[IMX6UL_CLK_USDHC2_SEL] = imx_clk_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); + clks[IMX6UL_CLK_USDHC1_SEL] = imx_clk_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); + clks[IMX6UL_CLK_SAI3_SEL] = imx_clk_mux("sai3_sel", base + 0x1c, 14, 2, sai_sels, ARRAY_SIZE(sai_sels)); + clks[IMX6UL_CLK_SAI2_SEL] = imx_clk_mux("sai2_sel", base + 0x1c, 12, 2, sai_sels, ARRAY_SIZE(sai_sels)); + clks[IMX6UL_CLK_SAI1_SEL] = imx_clk_mux("sai1_sel", base + 0x1c, 10, 2, sai_sels, ARRAY_SIZE(sai_sels)); + clks[IMX6UL_CLK_QSPI1_SEL] = imx_clk_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels)); + clks[IMX6UL_CLK_PERCLK_SEL] = imx_clk_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels)); + clks[IMX6UL_CLK_CAN_SEL] = imx_clk_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels)); + clks[IMX6UL_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels)); + clks[IMX6UL_CLK_ENFC_SEL] = imx_clk_mux("enfc_sel", base + 0x2c, 15, 3, enfc_sels, ARRAY_SIZE(enfc_sels)); + clks[IMX6UL_CLK_LDB_DI0_SEL] = imx_clk_mux("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di0_sels, ARRAY_SIZE(ldb_di0_sels)); + clks[IMX6UL_CLK_SPDIF_SEL] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, spdif_sels, ARRAY_SIZE(spdif_sels)); + clks[IMX6UL_CLK_SIM_PRE_SEL] = imx_clk_mux("sim_pre_sel", base + 0x34, 15, 3, sim_pre_sels, ARRAY_SIZE(sim_pre_sels)); + clks[IMX6UL_CLK_SIM_SEL] = imx_clk_mux("sim_sel", base + 0x34, 9, 3, sim_sels, ARRAY_SIZE(sim_sels)); + clks[IMX6UL_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels)); + clks[IMX6UL_CLK_LCDIF_PRE_SEL] = imx_clk_mux("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels)); + clks[IMX6UL_CLK_LCDIF_SEL] = imx_clk_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels)); + + clks[IMX6UL_CLK_LDB_DI0_DIV_SEL] = imx_clk_mux("ldb_di0", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels)); + clks[IMX6UL_CLK_LDB_DI1_DIV_SEL] = imx_clk_mux("ldb_di1", base + 0x20, 11, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels)); + + clks[IMX6UL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); + clks[IMX6UL_CLK_LDB_DI0_DIV_7] = imx_clk_fixed_factor("ldb_di0_div_7", "ldb_di0_sel", 1, 7); + clks[IMX6UL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "qspi1_sel", 2, 7); + clks[IMX6UL_CLK_LDB_DI1_DIV_7] = imx_clk_fixed_factor("ldb_di1_div_7", "qspi1_sel", 1, 7); + + clks[IMX6UL_CLK_PERIPH] = imx_clk_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels)); + clks[IMX6UL_CLK_PERIPH2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels)); + + clks[IMX6UL_CLK_PERIPH_CLK2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3); + clks[IMX6UL_CLK_PERIPH2_CLK2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3); + clks[IMX6UL_CLK_IPG] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2); + clks[IMX6UL_CLK_LCDIF_PODF] = imx_clk_divider("lcdif_podf", "lcdif_pred", base + 0x18, 23, 3); + clks[IMX6UL_CLK_QSPI1_PDOF] = imx_clk_divider("qspi1_podf", "qspi1_sel", base + 0x1c, 26, 3); + clks[IMX6UL_CLK_EIM_SLOW_PODF] = imx_clk_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3); + clks[IMX6UL_CLK_PERCLK] = imx_clk_divider("perclk", "perclk_sel", base + 0x1c, 0, 6); + clks[IMX6UL_CLK_CAN_PODF] = imx_clk_divider("can_podf", "can_sel", base + 0x20, 2, 6); + clks[IMX6UL_CLK_GPMI_PODF] = imx_clk_divider("gpmi_podf", "gpmi_sel", base + 0x24, 22, 3); + clks[IMX6UL_CLK_BCH_PODF] = imx_clk_divider("bch_podf", "bch_sel", base + 0x24, 19, 3); + clks[IMX6UL_CLK_USDHC2_PODF] = imx_clk_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3); + clks[IMX6UL_CLK_USDHC1_PODF] = imx_clk_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3); + clks[IMX6UL_CLK_UART_PODF] = imx_clk_divider("uart_podf", "uart_sel", base + 0x24, 0, 6); + clks[IMX6UL_CLK_SAI3_PRED] = imx_clk_divider("sai3_pred", "sai3_sel", base + 0x28, 22, 3); + clks[IMX6UL_CLK_SAI3_PODF] = imx_clk_divider("sai3_podf", "sai3_pred", base + 0x28, 16, 6); + clks[IMX6UL_CLK_SAI1_PRED] = imx_clk_divider("sai1_pred", "sai1_sel", base + 0x28, 6, 3); + clks[IMX6UL_CLK_SAI1_PODF] = imx_clk_divider("sai1_podf", "sai1_pred", base + 0x28, 0, 6); + clks[IMX6UL_CLK_ENFC_PRED] = imx_clk_divider("enfc_pred", "enfc_sel", base + 0x2c, 18, 3); + clks[IMX6UL_CLK_ENFC_PODF] = imx_clk_divider("enfc_podf", "enfc_pred", base + 0x2c, 21, 6); + clks[IMX6UL_CLK_SAI2_PRED] = imx_clk_divider("sai2_pred", "sai2_sel", base + 0x2c, 6, 3); + clks[IMX6UL_CLK_SAI2_PODF] = imx_clk_divider("sai2_podf", "sai2_pred", base + 0x2c, 0, 6); + clks[IMX6UL_CLK_SPDIF_PRED] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3); + clks[IMX6UL_CLK_SPDIF_PODF] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3); + clks[IMX6UL_CLK_SIM_PODF] = imx_clk_divider("sim_podf", "sim_pre_sel", base + 0x34, 12, 3); + clks[IMX6UL_CLK_ECSPI_PODF] = imx_clk_divider("ecspi_podf", "ecspi_sel", base + 0x38, 19, 6); + clks[IMX6UL_CLK_LCDIF_PRED] = imx_clk_divider("lcdif_pred", "lcdif_pre_sel", base + 0x38, 12, 3); + clks[IMX6UL_CLK_CSI_PODF] = imx_clk_divider("csi_podf", "csi_sel", base + 0x3c, 11, 3); + + clks[IMX6UL_CLK_ARM] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16); + clks[IMX6UL_CLK_MMDC_PODF] = imx_clk_busy_divider("mmdc_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2); + clks[IMX6UL_CLK_AXI_PODF] = imx_clk_busy_divider("axi_podf", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0); + clks[IMX6UL_CLK_AHB] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1); + + /* CCGR0 */ + clks[IMX6UL_CLK_AIPSTZ1] = imx_clk_gate2("aips_tz1", "ahb", base + 0x68, 0); + clks[IMX6UL_CLK_AIPSTZ2] = imx_clk_gate2("aips_tz2", "ahb", base + 0x68, 2); + clks[IMX6UL_CLK_APBHDMA] = imx_clk_gate2("apbh_dma", "bch_podf", base + 0x68, 4); + clks[IMX6UL_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc); + clks[IMX6UL_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc); + clks[IMX6UL_CLK_CAAM_MEM] = imx_clk_gate2("caam_mem", "ahb", base + 0x68, 8); + clks[IMX6UL_CLK_CAAM_ACLK] = imx_clk_gate2("caam_aclk", "ahb", base + 0x68, 10); + clks[IMX6UL_CLK_CAAM_IPG] = imx_clk_gate2("caam_ipg", "ipg", base + 0x68, 12); + clks[IMX6UL_CLK_CAN1_IPG] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14); + clks[IMX6UL_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_podf", base + 0x68, 16); + clks[IMX6UL_CLK_CAN2_IPG] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18); + clks[IMX6UL_CLK_CAN2_SERIAL] = imx_clk_gate2("can2_serial", "can_podf", base + 0x68, 20); + clks[IMX6UL_CLK_GPT2_BUS] = imx_clk_gate2("gpt_bus", "perclk", base + 0x68, 24); + clks[IMX6UL_CLK_GPT2_SERIAL] = imx_clk_gate2("gpt_serial", "perclk", base + 0x68, 26); + clks[IMX6UL_CLK_UART2_IPG] = imx_clk_gate2("uart2_ipg", "ipg", base + 0x68, 28); + clks[IMX6UL_CLK_UART2_SERIAL] = imx_clk_gate2("uart2_serial", "uart_podf", base + 0x68, 28); + clks[IMX6UL_CLK_AIPSTZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x68, 30); + + /* CCGR1 */ + clks[IMX6UL_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0); + clks[IMX6UL_CLK_ECSPI2] = imx_clk_gate2("ecspi2", "ecspi_podf", base + 0x6c, 2); + clks[IMX6UL_CLK_ECSPI3] = imx_clk_gate2("ecspi3", "ecspi_podf", base + 0x6c, 4); + clks[IMX6UL_CLK_ECSPI4] = imx_clk_gate2("ecspi4", "ecspi_podf", base + 0x6c, 6); + clks[IMX6UL_CLK_ADC2] = imx_clk_gate2("adc2", "ipg", base + 0x6c, 8); + clks[IMX6UL_CLK_UART3_IPG] = imx_clk_gate2("uart3_ipg", "ipg", base + 0x6c, 10); + clks[IMX6UL_CLK_UART3_SERIAL] = imx_clk_gate2("uart3_serial", "uart_podf", base + 0x6c, 10); + clks[IMX6UL_CLK_EPIT1] = imx_clk_gate2("epit1", "perclk", base + 0x6c, 12); + clks[IMX6UL_CLK_EPIT2] = imx_clk_gate2("epit2", "perclk", base + 0x6c, 14); + clks[IMX6UL_CLK_ADC1] = imx_clk_gate2("adc1", "ipg", base + 0x6c, 16); + clks[IMX6UL_CLK_GPT1_BUS] = imx_clk_gate2("gpt1_bus", "perclk", base + 0x6c, 20); + clks[IMX6UL_CLK_GPT1_SERIAL] = imx_clk_gate2("gpt1_serial", "perclk", base + 0x6c, 22); + clks[IMX6UL_CLK_UART4_IPG] = imx_clk_gate2("uart4_ipg", "ipg", base + 0x6c, 24); + clks[IMX6UL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serail", "uart_podf", base + 0x6c, 24); + + /* CCGR2 */ + clks[IMX6UL_CLK_CSI] = imx_clk_gate2("csi", "csi_podf", base + 0x70, 2); + clks[IMX6UL_CLK_I2C1] = imx_clk_gate2("i2c1", "perclk", base + 0x70, 6); + clks[IMX6UL_CLK_I2C2] = imx_clk_gate2("i2c2", "perclk", base + 0x70, 8); + clks[IMX6UL_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10); + clks[IMX6UL_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12); + clks[IMX6UL_CLK_IOMUXC] = imx_clk_gate2("iomuxc", "lcdif_podf", base + 0x70, 14); + clks[IMX6UL_CLK_LCDIF_APB] = imx_clk_gate2("lcdif_apb", "axi", base + 0x70, 28); + clks[IMX6UL_CLK_PXP] = imx_clk_gate2("pxp", "axi", base + 0x70, 30); + + /* CCGR3 */ + clks[IMX6UL_CLK_UART5_IPG] = imx_clk_gate2("uart5_ipg", "ipg", base + 0x74, 2); + clks[IMX6UL_CLK_UART5_SERIAL] = imx_clk_gate2("uart5_serial", "uart_podf", base + 0x74, 2); + clks[IMX6UL_CLK_ENET] = imx_clk_gate2("enet", "ipg", base + 0x74, 4); + clks[IMX6UL_CLK_ENET_AHB] = imx_clk_gate2("enet_ahb", "ahb", base + 0x74, 4); + clks[IMX6UL_CLK_UART6_IPG] = imx_clk_gate2("uart6_ipg", "ipg", base + 0x74, 6); + clks[IMX6UL_CLK_UART6_SERIAL] = imx_clk_gate2("uart6_serial", "uart_podf", base + 0x74, 6); + clks[IMX6UL_CLK_LCDIF_PIX] = imx_clk_gate2("lcdif_pix", "lcdif_podf", base + 0x74, 10); + clks[IMX6UL_CLK_QSPI] = imx_clk_gate2("qspi1", "qspi1_podf", base + 0x74, 14); + clks[IMX6UL_CLK_WDOG1] = imx_clk_gate2("wdog1", "ipg", base + 0x74, 16); + clks[IMX6UL_CLK_MMDC_P0_FAST] = imx_clk_gate("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20); + clks[IMX6UL_CLK_MMDC_P0_IPG] = imx_clk_gate2("mmdc_p0_ipg", "ipg", base + 0x74, 24); + clks[IMX6UL_CLK_AXI] = imx_clk_gate("axi", "axi_podf", base + 0x74, 28); + + /* CCGR4 */ + clks[IMX6UL_CLK_PER_BCH] = imx_clk_gate2("per_bch", "bch_podf", base + 0x78, 12); + clks[IMX6UL_CLK_PWM1] = imx_clk_gate2("pwm1", "perclk", base + 0x78, 16); + clks[IMX6UL_CLK_PWM2] = imx_clk_gate2("pwm2", "perclk", base + 0x78, 18); + clks[IMX6UL_CLK_PWM3] = imx_clk_gate2("pwm3", "perclk", base + 0x78, 20); + clks[IMX6UL_CLK_PWM4] = imx_clk_gate2("pwm4", "perclk", base + 0x78, 22); + clks[IMX6UL_CLK_GPMI_BCH_APB] = imx_clk_gate2("gpmi_bch_apb", "bch_podf", base + 0x78, 24); + clks[IMX6UL_CLK_GPMI_BCH] = imx_clk_gate2("gpmi_bch", "gpmi_podf", base + 0x78, 26); + clks[IMX6UL_CLK_GPMI_IO] = imx_clk_gate2("gpmi_io", "enfc_podf", base + 0x78, 28); + clks[IMX6UL_CLK_GPMI_APB] = imx_clk_gate2("gpmi_apb", "bch_podf", base + 0x78, 30); + + /* CCGR5 */ + clks[IMX6UL_CLK_ROM] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0); + clks[IMX6UL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6); + clks[IMX6UL_CLK_WDOG2] = imx_clk_gate2("wdog2", "ipg", base + 0x7c, 10); + clks[IMX6UL_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12); + clks[IMX6UL_CLK_SPDIF] = imx_clk_gate2_shared("spdif", "spdif_podf", base + 0x7c, 14, &share_count_audio); + clks[IMX6UL_CLK_SPDIF_GCLK] = imx_clk_gate2_shared("spdif_gclk", "ipg", base + 0x7c, 14, &share_count_audio); + clks[IMX6UL_CLK_SAI3] = imx_clk_gate2_shared("sai3", "sai3_podf", base + 0x7c, 22, &share_count_sai3); + clks[IMX6UL_CLK_SAI3_IPG] = imx_clk_gate2_shared("sai3_ipg", "ipg", base + 0x7c, 22, &share_count_sai3); + clks[IMX6UL_CLK_UART1_IPG] = imx_clk_gate2("uart1_ipg", "ipg", base + 0x7c, 24); + clks[IMX6UL_CLK_UART1_SERIAL] = imx_clk_gate2("uart1_serial", "uart_podf", base + 0x7c, 24); + clks[IMX6UL_CLK_UART7_IPG] = imx_clk_gate2("uart7_ipg", "ipg", base + 0x7c, 26); + clks[IMX6UL_CLK_UART7_SERIAL] = imx_clk_gate2("uart7_serial", "uart_podf", base + 0x7c, 26); + clks[IMX6UL_CLK_SAI1] = imx_clk_gate2_shared("sai1", "sai1_podf", base + 0x7c, 28, &share_count_sai1); + clks[IMX6UL_CLK_SAI1_IPG] = imx_clk_gate2_shared("sai1_ipg", "ipg", base + 0x7c, 28, &share_count_sai1); + clks[IMX6UL_CLK_SAI2] = imx_clk_gate2_shared("sai2", "sai2_podf", base + 0x7c, 30, &share_count_sai2); + clks[IMX6UL_CLK_SAI2_IPG] = imx_clk_gate2_shared("sai2_ipg", "ipg", base + 0x7c, 30, &share_count_sai2); + + /* CCGR6 */ + clks[IMX6UL_CLK_USBOH3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0); + clks[IMX6UL_CLK_USDHC1] = imx_clk_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2); + clks[IMX6UL_CLK_USDHC2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4); + clks[IMX6UL_CLK_SIM1] = imx_clk_gate2("sim1", "sim_sel", base + 0x80, 6); + clks[IMX6UL_CLK_SIM2] = imx_clk_gate2("sim2", "sim_sel", base + 0x80, 8); + clks[IMX6UL_CLK_EIM] = imx_clk_gate2("eim", "eim_slow_podf", base + 0x80, 10); + clks[IMX6UL_CLK_PWM8] = imx_clk_gate2("pwm8", "perclk", base + 0x80, 16); + clks[IMX6UL_CLK_UART8_IPG] = imx_clk_gate2("uart8_ipg", "ipg", base + 0x80, 14); + clks[IMX6UL_CLK_UART8_SERIAL] = imx_clk_gate2("uart8_serial", "uart_podf", base + 0x80, 14); + clks[IMX6UL_CLK_WDOG3] = imx_clk_gate2("wdog3", "ipg", base + 0x80, 20); + clks[IMX6UL_CLK_I2C4] = imx_clk_gate2("i2c4", "perclk", base + 0x80, 24); + clks[IMX6UL_CLK_PWM5] = imx_clk_gate2("pwm5", "perclk", base + 0x80, 26); + clks[IMX6UL_CLK_PWM6] = imx_clk_gate2("pwm6", "perclk", base + 0x80, 28); + clks[IMX6UL_CLK_PWM7] = imx_clk_gate2("Pwm7", "perclk", base + 0x80, 30); + + /* mask handshake of mmdc */ + writel_relaxed(BM_CCM_CCDR_MMDC_CH0_MASK, base + CCDR); + + for (i = 0; i < ARRAY_SIZE(clks); i++) + if (IS_ERR(clks[i])) + pr_err("i.MX6UL clk %d: register failed with %ld\n", i, PTR_ERR(clks[i])); + + clk_data.clks = clks; + clk_data.clk_num = ARRAY_SIZE(clks); + of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); + + /* set perclk to from OSC */ + clk_set_parent(clks[IMX6UL_CLK_PERCLK_SEL], clks[IMX6UL_CLK_OSC]); + + clk_set_rate(clks[IMX6UL_CLK_ENET_REF], 50000000); + clk_set_rate(clks[IMX6UL_CLK_ENET2_REF], 50000000); + clk_set_rate(clks[IMX6UL_CLK_CSI], 24000000); + + /* keep all the clks on just for bringup */ + for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) + clk_prepare_enable(clks[clks_init_on[i]]); + + if (IS_ENABLED(CONFIG_USB_MXS_PHY)) { + clk_prepare_enable(clks[IMX6UL_CLK_USBPHY1_GATE]); + clk_prepare_enable(clks[IMX6UL_CLK_USBPHY2_GATE]); + } + + clk_set_parent(clks[IMX6UL_CLK_CAN_SEL], clks[IMX6UL_CLK_PLL3_60M]); + clk_set_parent(clks[IMX6UL_CLK_SIM_PRE_SEL], clks[IMX6UL_CLK_PLL3_USB_OTG]); + + clk_set_parent(clks[IMX6UL_CLK_ENFC_SEL], clks[IMX6UL_CLK_PLL2_PFD2]); +} + +CLK_OF_DECLARE(imx6ul, "fsl,imx6ul-ccm", imx6ul_clocks_init); diff --git a/drivers/clk/imx/clk-pfd.c b/drivers/clk/imx/clk-pfd.c index 0b0f6f66e..04a3e78ea 100644 --- a/drivers/clk/imx/clk-pfd.c +++ b/drivers/clk/imx/clk-pfd.c @@ -10,7 +10,6 @@ * http://www.gnu.org/copyleft/gpl.html */ -#include #include #include #include diff --git a/drivers/clk/imx/clk-pllv1.c b/drivers/clk/imx/clk-pllv1.c index c34ad8a61..8564e4342 100644 --- a/drivers/clk/imx/clk-pllv1.c +++ b/drivers/clk/imx/clk-pllv1.c @@ -1,4 +1,3 @@ -#include #include #include #include diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c index f0d15fb9d..6addf8f58 100644 --- a/drivers/clk/imx/clk-pllv3.c +++ b/drivers/clk/imx/clk-pllv3.c @@ -10,7 +10,6 @@ * http://www.gnu.org/copyleft/gpl.html */ -#include #include #include #include diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c index b936cdd1a..7cfb7b2a2 100644 --- a/drivers/clk/ingenic/cgu.c +++ b/drivers/clk/ingenic/cgu.c @@ -16,6 +16,7 @@ */ #include +#include #include #include #include diff --git a/drivers/clk/keystone/gate.c b/drivers/clk/keystone/gate.c index 86f1e362e..aed5af238 100644 --- a/drivers/clk/keystone/gate.c +++ b/drivers/clk/keystone/gate.c @@ -10,7 +10,6 @@ * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ -#include #include #include #include diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c index 4a375ead7..3f553d0ae 100644 --- a/drivers/clk/keystone/pll.c +++ b/drivers/clk/keystone/pll.c @@ -10,7 +10,6 @@ * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ -#include #include #include #include @@ -309,8 +308,7 @@ static void __init of_pll_mux_clk_init(struct device_node *node) return; } - parents[0] = of_clk_get_parent_name(node, 0); - parents[1] = of_clk_get_parent_name(node, 1); + of_clk_parent_fill(node, parents, 2); if (!parents[0] || !parents[1]) { pr_err("%s: missing parent clocks\n", __func__); return; diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h index 6b6780b1e..11e25c992 100644 --- a/drivers/clk/mediatek/clk-gate.h +++ b/drivers/clk/mediatek/clk-gate.h @@ -16,9 +16,10 @@ #define __DRV_CLK_GATE_H #include -#include #include +struct clk; + struct mtk_clk_gate { struct clk_hw hw; struct regmap *regmap; diff --git a/drivers/clk/mediatek/clk-mt8135.c b/drivers/clk/mediatek/clk-mt8135.c index 08b4b849b..07c21e44b 100644 --- a/drivers/clk/mediatek/clk-mt8135.c +++ b/drivers/clk/mediatek/clk-mt8135.c @@ -12,6 +12,7 @@ * GNU General Public License for more details. */ +#include #include #include #include diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c index 8b6523d15..90eff85f4 100644 --- a/drivers/clk/mediatek/clk-mt8173.c +++ b/drivers/clk/mediatek/clk-mt8173.c @@ -12,6 +12,7 @@ * GNU General Public License for more details. */ +#include #include #include #include @@ -795,8 +796,9 @@ CLK_OF_DECLARE(mtk_pericfg, "mediatek,mt8173-pericfg", mtk_pericfg_init); #define CON0_MT8173_RST_BAR BIT(24) -#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, _pd_reg, _pd_shift, \ - _tuner_reg, _pcw_reg, _pcw_shift) { \ +#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \ + _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \ + _pcw_shift, _div_table) { \ .id = _id, \ .name = _name, \ .reg = _reg, \ @@ -811,14 +813,31 @@ CLK_OF_DECLARE(mtk_pericfg, "mediatek,mt8173-pericfg", mtk_pericfg_init); .tuner_reg = _tuner_reg, \ .pcw_reg = _pcw_reg, \ .pcw_shift = _pcw_shift, \ + .div_table = _div_table, \ } +#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \ + _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \ + _pcw_shift) \ + PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \ + _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \ + NULL) + +static const struct mtk_pll_div_table mmpll_div_table[] = { + { .div = 0, .freq = MT8173_PLL_FMAX }, + { .div = 1, .freq = 1000000000 }, + { .div = 2, .freq = 702000000 }, + { .div = 3, .freq = 253500000 }, + { .div = 4, .freq = 126750000 }, + { } /* sentinel */ +}; + static const struct mtk_pll_data plls[] = { PLL(CLK_APMIXED_ARMCA15PLL, "armca15pll", 0x200, 0x20c, 0x00000001, 0, 21, 0x204, 24, 0x0, 0x204, 0), PLL(CLK_APMIXED_ARMCA7PLL, "armca7pll", 0x210, 0x21c, 0x00000001, 0, 21, 0x214, 24, 0x0, 0x214, 0), PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x220, 0x22c, 0xf0000101, HAVE_RST_BAR, 21, 0x220, 4, 0x0, 0x224, 0), PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x230, 0x23c, 0xfe000001, HAVE_RST_BAR, 7, 0x230, 4, 0x0, 0x234, 14), - PLL(CLK_APMIXED_MMPLL, "mmpll", 0x240, 0x24c, 0x00000001, 0, 21, 0x244, 24, 0x0, 0x244, 0), + PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x240, 0x24c, 0x00000001, 0, 21, 0x244, 24, 0x0, 0x244, 0, mmpll_div_table), PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x250, 0x25c, 0x00000001, 0, 21, 0x250, 4, 0x0, 0x254, 0), PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x260, 0x26c, 0x00000001, 0, 21, 0x260, 4, 0x0, 0x264, 0), PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x270, 0x27c, 0x00000001, 0, 21, 0x270, 4, 0x0, 0x274, 0), diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h index 9dda9d8ad..c5cbecb3d 100644 --- a/drivers/clk/mediatek/clk-mtk.h +++ b/drivers/clk/mediatek/clk-mtk.h @@ -17,9 +17,10 @@ #include #include -#include #include +struct clk; + #define MAX_MUX_GATE_BIT 31 #define INVALID_MUX_GATE_BIT (MAX_MUX_GATE_BIT + 1) @@ -134,6 +135,11 @@ struct clk_onecell_data *mtk_alloc_clk_data(unsigned int clk_num); #define HAVE_RST_BAR BIT(0) +struct mtk_pll_div_table { + u32 div; + unsigned long freq; +}; + struct mtk_pll_data { int id; const char *name; @@ -150,6 +156,7 @@ struct mtk_pll_data { int pcwbits; uint32_t pcw_reg; int pcw_shift; + const struct mtk_pll_div_table *div_table; }; void __init mtk_clk_register_plls(struct device_node *node, diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c index 44409e98c..622e7b6c6 100644 --- a/drivers/clk/mediatek/clk-pll.c +++ b/drivers/clk/mediatek/clk-pll.c @@ -90,20 +90,23 @@ static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin, static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw, int postdiv) { - u32 con1, pd, val; + u32 con1, val; int pll_en; - /* set postdiv */ - pd = readl(pll->pd_addr); - pd &= ~(POSTDIV_MASK << pll->data->pd_shift); - pd |= (ffs(postdiv) - 1) << pll->data->pd_shift; - writel(pd, pll->pd_addr); - pll_en = readl(pll->base_addr + REG_CON0) & CON0_BASE_EN; - /* set pcw */ - val = readl(pll->pcw_addr); + /* set postdiv */ + val = readl(pll->pd_addr); + val &= ~(POSTDIV_MASK << pll->data->pd_shift); + val |= (ffs(postdiv) - 1) << pll->data->pd_shift; + + /* postdiv and pcw need to set at the same time if on same register */ + if (pll->pd_addr != pll->pcw_addr) { + writel(val, pll->pd_addr); + val = readl(pll->pcw_addr); + } + /* set pcw */ val &= ~GENMASK(pll->data->pcw_shift + pll->data->pcwbits - 1, pll->data->pcw_shift); val |= pcw << pll->data->pcw_shift; @@ -135,16 +138,28 @@ static void mtk_pll_calc_values(struct mtk_clk_pll *pll, u32 *pcw, u32 *postdiv, u32 freq, u32 fin) { unsigned long fmin = 1000 * MHZ; + const struct mtk_pll_div_table *div_table = pll->data->div_table; u64 _pcw; u32 val; if (freq > pll->data->fmax) freq = pll->data->fmax; - for (val = 0; val < 4; val++) { + if (div_table) { + if (freq > div_table[0].freq) + freq = div_table[0].freq; + + for (val = 0; div_table[val + 1].freq != 0; val++) { + if (freq > div_table[val + 1].freq) + break; + } *postdiv = 1 << val; - if (freq * *postdiv >= fmin) - break; + } else { + for (val = 0; val < 5; val++) { + *postdiv = 1 << val; + if ((u64)freq * *postdiv >= fmin) + break; + } } /* _pcw = freq * postdiv / fin * 2^pcwfbits */ diff --git a/drivers/clk/meson/clk-cpu.c b/drivers/clk/meson/clk-cpu.c index 71ad493b9..f7c30ea54 100644 --- a/drivers/clk/meson/clk-cpu.c +++ b/drivers/clk/meson/clk-cpu.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #define MESON_CPU_CLK_CNTL1 0x00 diff --git a/drivers/clk/meson/clkc.c b/drivers/clk/meson/clkc.c index b8c511c5e..c83ae1367 100644 --- a/drivers/clk/meson/clkc.c +++ b/drivers/clk/meson/clkc.c @@ -15,7 +15,6 @@ * this program. If not, see . */ -#include #include #include #include diff --git a/drivers/clk/mmp/clk-apbc.c b/drivers/clk/mmp/clk-apbc.c index 09d41c717..4c717db05 100644 --- a/drivers/clk/mmp/clk-apbc.c +++ b/drivers/clk/mmp/clk-apbc.c @@ -10,7 +10,6 @@ */ #include -#include #include #include #include diff --git a/drivers/clk/mmp/clk-apmu.c b/drivers/clk/mmp/clk-apmu.c index cdcf2d7f3..47b5542ce 100644 --- a/drivers/clk/mmp/clk-apmu.c +++ b/drivers/clk/mmp/clk-apmu.c @@ -10,7 +10,6 @@ */ #include -#include #include #include #include diff --git a/drivers/clk/mmp/clk-gate.c b/drivers/clk/mmp/clk-gate.c index adbd9d64d..d20cd3431 100644 --- a/drivers/clk/mmp/clk-gate.c +++ b/drivers/clk/mmp/clk-gate.c @@ -27,7 +27,6 @@ static int mmp_clk_gate_enable(struct clk_hw *hw) { struct mmp_clk_gate *gate = to_clk_mmp_gate(hw); - struct clk *clk = hw->clk; unsigned long flags = 0; unsigned long rate; u32 tmp; @@ -44,7 +43,7 @@ static int mmp_clk_gate_enable(struct clk_hw *hw) spin_unlock_irqrestore(gate->lock, flags); if (gate->flags & MMP_CLK_GATE_NEED_DELAY) { - rate = __clk_get_rate(clk); + rate = clk_hw_get_rate(hw); /* Need delay 2 cycles. */ udelay(2000000/rate); } diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c index de6a87317..c554833cf 100644 --- a/drivers/clk/mmp/clk-mix.c +++ b/drivers/clk/mmp/clk-mix.c @@ -63,7 +63,7 @@ static unsigned int _get_div(struct mmp_clk_mix *mix, unsigned int val) static unsigned int _get_mux(struct mmp_clk_mix *mix, unsigned int val) { - int num_parents = __clk_get_num_parents(mix->hw.clk); + int num_parents = clk_hw_get_num_parents(&mix->hw); int i; if (mix->mux_flags & CLK_MUX_INDEX_BIT) @@ -113,15 +113,15 @@ static void _filter_clk_table(struct mmp_clk_mix *mix, { int i; struct mmp_clk_mix_clk_table *item; - struct clk *parent, *clk; + struct clk_hw *parent, *hw; unsigned long parent_rate; - clk = mix->hw.clk; + hw = &mix->hw; for (i = 0; i < table_size; i++) { item = &table[i]; - parent = clk_get_parent_by_index(clk, item->parent_index); - parent_rate = __clk_get_rate(parent); + parent = clk_hw_get_parent_by_index(hw, item->parent_index); + parent_rate = clk_hw_get_rate(parent); if (parent_rate % item->rate) { item->valid = 0; } else { @@ -181,7 +181,7 @@ static int _set_rate(struct mmp_clk_mix *mix, u32 mux_val, u32 div_val, if (timeout == 0) { pr_err("%s:%s cannot do frequency change\n", - __func__, __clk_get_name(mix->hw.clk)); + __func__, clk_hw_get_name(&mix->hw)); ret = -EBUSY; goto error; } @@ -201,27 +201,22 @@ error: return ret; } -static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_clk) +static int mmp_clk_mix_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { struct mmp_clk_mix *mix = to_clk_mix(hw); struct mmp_clk_mix_clk_table *item; - struct clk *parent, *parent_best, *mix_clk; + struct clk_hw *parent, *parent_best; unsigned long parent_rate, mix_rate, mix_rate_best, parent_rate_best; unsigned long gap, gap_best; u32 div_val_max; unsigned int div; int i, j; - mix_clk = hw->clk; - parent = NULL; mix_rate_best = 0; parent_rate_best = 0; - gap_best = rate; + gap_best = ULONG_MAX; parent_best = NULL; if (mix->table) { @@ -229,11 +224,11 @@ static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate, item = &mix->table[i]; if (item->valid == 0) continue; - parent = clk_get_parent_by_index(mix_clk, + parent = clk_hw_get_parent_by_index(hw, item->parent_index); - parent_rate = __clk_get_rate(parent); + parent_rate = clk_hw_get_rate(parent); mix_rate = parent_rate / item->divisor; - gap = abs(mix_rate - rate); + gap = abs(mix_rate - req->rate); if (parent_best == NULL || gap < gap_best) { parent_best = parent; parent_rate_best = parent_rate; @@ -244,14 +239,14 @@ static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate, } } } else { - for (i = 0; i < __clk_get_num_parents(mix_clk); i++) { - parent = clk_get_parent_by_index(mix_clk, i); - parent_rate = __clk_get_rate(parent); + for (i = 0; i < clk_hw_get_num_parents(hw); i++) { + parent = clk_hw_get_parent_by_index(hw, i); + parent_rate = clk_hw_get_rate(parent); div_val_max = _get_maxdiv(mix); for (j = 0; j < div_val_max; j++) { div = _get_div(mix, j); mix_rate = parent_rate / div; - gap = abs(mix_rate - rate); + gap = abs(mix_rate - req->rate); if (parent_best == NULL || gap < gap_best) { parent_best = parent; parent_rate_best = parent_rate; @@ -265,10 +260,14 @@ static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate, } found: - *best_parent_rate = parent_rate_best; - *best_parent_clk = __clk_get_hw(parent_best); + if (!parent_best) + return -EINVAL; + + req->best_parent_rate = parent_rate_best; + req->best_parent_hw = parent_best; + req->rate = mix_rate_best; - return mix_rate_best; + return 0; } static int mmp_clk_mix_set_rate_and_parent(struct clk_hw *hw, @@ -381,20 +380,19 @@ static int mmp_clk_set_rate(struct clk_hw *hw, unsigned long rate, struct mmp_clk_mix_clk_table *item; unsigned long parent_rate; unsigned int best_divisor; - struct clk *mix_clk, *parent; + struct clk_hw *parent; int i; best_divisor = best_parent_rate / rate; - mix_clk = hw->clk; if (mix->table) { for (i = 0; i < mix->table_size; i++) { item = &mix->table[i]; if (item->valid == 0) continue; - parent = clk_get_parent_by_index(mix_clk, + parent = clk_hw_get_parent_by_index(hw, item->parent_index); - parent_rate = __clk_get_rate(parent); + parent_rate = clk_hw_get_rate(parent); if (parent_rate == best_parent_rate && item->divisor == best_divisor) break; @@ -407,13 +405,13 @@ static int mmp_clk_set_rate(struct clk_hw *hw, unsigned long rate, else return -EINVAL; } else { - for (i = 0; i < __clk_get_num_parents(mix_clk); i++) { - parent = clk_get_parent_by_index(mix_clk, i); - parent_rate = __clk_get_rate(parent); + for (i = 0; i < clk_hw_get_num_parents(hw); i++) { + parent = clk_hw_get_parent_by_index(hw, i); + parent_rate = clk_hw_get_rate(parent); if (parent_rate == best_parent_rate) break; } - if (i < __clk_get_num_parents(mix_clk)) + if (i < clk_hw_get_num_parents(hw)) return _set_rate(mix, _get_mux_val(mix, i), _get_div_val(mix, best_divisor), 1, 1); else @@ -468,20 +466,20 @@ struct clk *mmp_clk_register_mix(struct device *dev, memcpy(&mix->reg_info, &config->reg_info, sizeof(config->reg_info)); if (config->table) { table_bytes = sizeof(*config->table) * config->table_size; - mix->table = kzalloc(table_bytes, GFP_KERNEL); + mix->table = kmemdup(config->table, table_bytes, GFP_KERNEL); if (!mix->table) { pr_err("%s:%s: could not allocate mmp mix table\n", __func__, name); kfree(mix); return ERR_PTR(-ENOMEM); } - memcpy(mix->table, config->table, table_bytes); mix->table_size = config->table_size; } if (config->mux_table) { table_bytes = sizeof(u32) * num_parents; - mix->mux_table = kzalloc(table_bytes, GFP_KERNEL); + mix->mux_table = kmemdup(config->mux_table, table_bytes, + GFP_KERNEL); if (!mix->mux_table) { pr_err("%s:%s: could not allocate mmp mix mux-table\n", __func__, name); @@ -489,7 +487,6 @@ struct clk *mmp_clk_register_mix(struct device *dev, kfree(mix); return ERR_PTR(-ENOMEM); } - memcpy(mix->mux_table, config->mux_table, table_bytes); } mix->div_flags = config->div_flags; diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c index cf038ef54..61893fe73 100644 --- a/drivers/clk/mmp/clk.c +++ b/drivers/clk/mmp/clk.c @@ -1,7 +1,6 @@ #include -#include #include -#include +#include #include #include diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c index 3821a8807..85da8b983 100644 --- a/drivers/clk/mvebu/clk-cpu.c +++ b/drivers/clk/mvebu/clk-cpu.c @@ -10,7 +10,8 @@ * warranty of any kind, whether express or implied. */ #include -#include +#include +#include #include #include #include @@ -120,7 +121,7 @@ static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate, if (!cpuclk->pmu_dfs) return -ENODEV; - cur_rate = __clk_get_rate(hwclk->clk); + cur_rate = clk_hw_get_rate(hwclk); reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET); fabric_div = (reg >> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT) & diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c index 15b370ff3..4a22429cd 100644 --- a/drivers/clk/mvebu/common.c +++ b/drivers/clk/mvebu/common.c @@ -13,8 +13,8 @@ */ #include +#include #include -#include #include #include #include diff --git a/drivers/clk/mxs/clk-div.c b/drivers/clk/mxs/clk-div.c index 90e1da938..049ee27d5 100644 --- a/drivers/clk/mxs/clk-div.c +++ b/drivers/clk/mxs/clk-div.c @@ -9,7 +9,6 @@ * http://www.gnu.org/copyleft/gpl.html */ -#include #include #include #include diff --git a/drivers/clk/mxs/clk-frac.c b/drivers/clk/mxs/clk-frac.c index e6aa6b567..73f024056 100644 --- a/drivers/clk/mxs/clk-frac.c +++ b/drivers/clk/mxs/clk-frac.c @@ -9,7 +9,6 @@ * http://www.gnu.org/copyleft/gpl.html */ -#include #include #include #include diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c index 32216f9b7..f01876af6 100644 --- a/drivers/clk/mxs/clk-imx23.c +++ b/drivers/clk/mxs/clk-imx23.c @@ -9,9 +9,8 @@ * http://www.gnu.org/copyleft/gpl.html */ -#include #include -#include +#include #include #include #include diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c index a68670868..6b572b759 100644 --- a/drivers/clk/mxs/clk-imx28.c +++ b/drivers/clk/mxs/clk-imx28.c @@ -9,9 +9,9 @@ * http://www.gnu.org/copyleft/gpl.html */ -#include #include #include +#include #include #include #include diff --git a/drivers/clk/mxs/clk-pll.c b/drivers/clk/mxs/clk-pll.c index fadae4183..d4ca79a86 100644 --- a/drivers/clk/mxs/clk-pll.c +++ b/drivers/clk/mxs/clk-pll.c @@ -9,7 +9,6 @@ * http://www.gnu.org/copyleft/gpl.html */ -#include #include #include #include diff --git a/drivers/clk/mxs/clk-ref.c b/drivers/clk/mxs/clk-ref.c index 4adeed6c2..495f99b79 100644 --- a/drivers/clk/mxs/clk-ref.c +++ b/drivers/clk/mxs/clk-ref.c @@ -9,7 +9,6 @@ * http://www.gnu.org/copyleft/gpl.html */ -#include #include #include #include diff --git a/drivers/clk/mxs/clk.h b/drivers/clk/mxs/clk.h index f07d821dd..a4590956d 100644 --- a/drivers/clk/mxs/clk.h +++ b/drivers/clk/mxs/clk.h @@ -12,7 +12,8 @@ #ifndef __MXS_CLK_H #define __MXS_CLK_H -#include +struct clk; + #include #include diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c index 81e9e1c78..e0a3cb897 100644 --- a/drivers/clk/nxp/clk-lpc18xx-cgu.c +++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c @@ -8,7 +8,6 @@ * warranty of any kind, whether express or implied. */ -#include #include #include #include diff --git a/drivers/clk/pistachio/clk-pll.c b/drivers/clk/pistachio/clk-pll.c index c9b459821..7e8daab90 100644 --- a/drivers/clk/pistachio/clk-pll.c +++ b/drivers/clk/pistachio/clk-pll.c @@ -200,7 +200,7 @@ static int pll_gf40lp_frac_set_rate(struct clk_hw *hw, unsigned long rate, struct pistachio_pll_rate_table *params; int enabled = pll_gf40lp_frac_is_enabled(hw); u64 val, vco, old_postdiv1, old_postdiv2; - const char *name = __clk_get_name(hw->clk); + const char *name = clk_hw_get_name(hw); if (rate < MIN_OUTPUT_FRAC || rate > MAX_OUTPUT_FRAC) return -EINVAL; @@ -357,7 +357,7 @@ static int pll_gf40lp_laint_set_rate(struct clk_hw *hw, unsigned long rate, struct pistachio_pll_rate_table *params; int enabled = pll_gf40lp_laint_is_enabled(hw); u32 val, vco, old_postdiv1, old_postdiv2; - const char *name = __clk_get_name(hw->clk); + const char *name = clk_hw_get_name(hw); if (rate < MIN_OUTPUT_LA || rate > MAX_OUTPUT_LA) return -EINVAL; diff --git a/drivers/clk/pistachio/clk.c b/drivers/clk/pistachio/clk.c index 85faa83e1..698cad4f5 100644 --- a/drivers/clk/pistachio/clk.c +++ b/drivers/clk/pistachio/clk.c @@ -6,6 +6,7 @@ * version 2, as published by the Free Software Foundation. */ +#include #include #include #include diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c index 6b4d2bcb1..26f7af315 100644 --- a/drivers/clk/qcom/clk-branch.c +++ b/drivers/clk/qcom/clk-branch.c @@ -75,7 +75,7 @@ static int clk_branch_wait(const struct clk_branch *br, bool enabling, bool (check_halt)(const struct clk_branch *, bool)) { bool voted = br->halt_check & BRANCH_VOTED; - const char *name = __clk_get_name(br->clkr.hw.clk); + const char *name = clk_hw_get_name(&br->clkr.hw); /* Skip checking halt bit if the clock is in hardware gated mode */ if (clk_branch_in_hwcg_mode(br)) diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c index 245d5063a..5b940d629 100644 --- a/drivers/clk/qcom/clk-pll.c +++ b/drivers/clk/qcom/clk-pll.c @@ -135,19 +135,19 @@ struct pll_freq_tbl *find_freq(const struct pll_freq_tbl *f, unsigned long rate) return NULL; } -static long -clk_pll_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, unsigned long max_rate, - unsigned long *p_rate, struct clk_hw **p) +static int +clk_pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { struct clk_pll *pll = to_clk_pll(hw); const struct pll_freq_tbl *f; - f = find_freq(pll->freq_tbl, rate); + f = find_freq(pll->freq_tbl, req->rate); if (!f) - return clk_pll_recalc_rate(hw, *p_rate); + req->rate = clk_pll_recalc_rate(hw, req->best_parent_rate); + else + req->rate = f->freq; - return f->freq; + return 0; } static int @@ -194,7 +194,7 @@ static int wait_for_pll(struct clk_pll *pll) u32 val; int count; int ret; - const char *name = __clk_get_name(pll->clkr.hw.clk); + const char *name = clk_hw_get_name(&pll->clkr.hw); /* Wait for pll to enable. */ for (count = 200; count > 0; count--) { @@ -213,7 +213,7 @@ static int wait_for_pll(struct clk_pll *pll) static int clk_pll_vote_enable(struct clk_hw *hw) { int ret; - struct clk_pll *p = to_clk_pll(__clk_get_hw(__clk_get_parent(hw->clk))); + struct clk_pll *p = to_clk_pll(clk_hw_get_parent(hw)); ret = clk_enable_regmap(hw); if (ret) @@ -292,3 +292,78 @@ void clk_pll_configure_sr_hpm_lp(struct clk_pll *pll, struct regmap *regmap, clk_pll_set_fsm_mode(pll, regmap, 0); } EXPORT_SYMBOL_GPL(clk_pll_configure_sr_hpm_lp); + +static int clk_pll_sr2_enable(struct clk_hw *hw) +{ + struct clk_pll *pll = to_clk_pll(hw); + int ret; + u32 mode; + + ret = regmap_read(pll->clkr.regmap, pll->mode_reg, &mode); + if (ret) + return ret; + + /* Disable PLL bypass mode. */ + ret = regmap_update_bits(pll->clkr.regmap, pll->mode_reg, PLL_BYPASSNL, + PLL_BYPASSNL); + if (ret) + return ret; + + /* + * H/W requires a 5us delay between disabling the bypass and + * de-asserting the reset. Delay 10us just to be safe. + */ + udelay(10); + + /* De-assert active-low PLL reset. */ + ret = regmap_update_bits(pll->clkr.regmap, pll->mode_reg, PLL_RESET_N, + PLL_RESET_N); + if (ret) + return ret; + + ret = wait_for_pll(pll); + if (ret) + return ret; + + /* Enable PLL output. */ + return regmap_update_bits(pll->clkr.regmap, pll->mode_reg, PLL_OUTCTRL, + PLL_OUTCTRL); +} + +static int +clk_pll_sr2_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long prate) +{ + struct clk_pll *pll = to_clk_pll(hw); + const struct pll_freq_tbl *f; + bool enabled; + u32 mode; + u32 enable_mask = PLL_OUTCTRL | PLL_BYPASSNL | PLL_RESET_N; + + f = find_freq(pll->freq_tbl, rate); + if (!f) + return -EINVAL; + + regmap_read(pll->clkr.regmap, pll->mode_reg, &mode); + enabled = (mode & enable_mask) == enable_mask; + + if (enabled) + clk_pll_disable(hw); + + regmap_update_bits(pll->clkr.regmap, pll->l_reg, 0x3ff, f->l); + regmap_update_bits(pll->clkr.regmap, pll->m_reg, 0x7ffff, f->m); + regmap_update_bits(pll->clkr.regmap, pll->n_reg, 0x7ffff, f->n); + + if (enabled) + clk_pll_sr2_enable(hw); + + return 0; +} + +const struct clk_ops clk_pll_sr2_ops = { + .enable = clk_pll_sr2_enable, + .disable = clk_pll_disable, + .set_rate = clk_pll_sr2_set_rate, + .recalc_rate = clk_pll_recalc_rate, + .determine_rate = clk_pll_determine_rate, +}; +EXPORT_SYMBOL_GPL(clk_pll_sr2_ops); diff --git a/drivers/clk/qcom/clk-pll.h b/drivers/clk/qcom/clk-pll.h index c9c0cda30..ffd0c63bd 100644 --- a/drivers/clk/qcom/clk-pll.h +++ b/drivers/clk/qcom/clk-pll.h @@ -62,6 +62,7 @@ struct clk_pll { extern const struct clk_ops clk_pll_ops; extern const struct clk_ops clk_pll_vote_ops; +extern const struct clk_ops clk_pll_sr2_ops; #define to_clk_pll(_hw) container_of(to_clk_regmap(_hw), struct clk_pll, clkr) diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c index 7b3d62674..bccedc4b5 100644 --- a/drivers/clk/qcom/clk-rcg.c +++ b/drivers/clk/qcom/clk-rcg.c @@ -45,7 +45,7 @@ static u32 src_to_ns(struct src_sel *s, u8 src, u32 ns) static u8 clk_rcg_get_parent(struct clk_hw *hw) { struct clk_rcg *rcg = to_clk_rcg(hw); - int num_parents = __clk_get_num_parents(hw->clk); + int num_parents = clk_hw_get_num_parents(hw); u32 ns; int i, ret; @@ -59,7 +59,7 @@ static u8 clk_rcg_get_parent(struct clk_hw *hw) err: pr_debug("%s: Clock %s has invalid parent, using default.\n", - __func__, __clk_get_name(hw->clk)); + __func__, clk_hw_get_name(hw)); return 0; } @@ -72,7 +72,7 @@ static int reg_to_bank(struct clk_dyn_rcg *rcg, u32 bank) static u8 clk_dyn_rcg_get_parent(struct clk_hw *hw) { struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw); - int num_parents = __clk_get_num_parents(hw->clk); + int num_parents = clk_hw_get_num_parents(hw); u32 ns, reg; int bank; int i, ret; @@ -95,7 +95,7 @@ static u8 clk_dyn_rcg_get_parent(struct clk_hw *hw) err: pr_debug("%s: Clock %s has invalid parent, using default.\n", - __func__, __clk_get_name(hw->clk)); + __func__, clk_hw_get_name(hw)); return 0; } @@ -404,14 +404,12 @@ clk_dyn_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) return calc_rate(parent_rate, m, n, mode, pre_div); } -static long _freq_tbl_determine_rate(struct clk_hw *hw, - const struct freq_tbl *f, unsigned long rate, - unsigned long min_rate, unsigned long max_rate, - unsigned long *p_rate, struct clk_hw **p_hw, +static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f, + struct clk_rate_request *req, const struct parent_map *parent_map) { - unsigned long clk_flags; - struct clk *p; + unsigned long clk_flags, rate = req->rate; + struct clk_hw *p; int index; f = qcom_find_freq(f, rate); @@ -422,8 +420,8 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw, if (index < 0) return index; - clk_flags = __clk_get_flags(hw->clk); - p = clk_get_parent_by_index(hw->clk, index); + clk_flags = clk_hw_get_flags(hw); + p = clk_hw_get_parent_by_index(hw, index); if (clk_flags & CLK_SET_RATE_PARENT) { rate = rate * f->pre_div; if (f->n) { @@ -433,27 +431,26 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw, rate = tmp; } } else { - rate = __clk_get_rate(p); + rate = clk_hw_get_rate(p); } - *p_hw = __clk_get_hw(p); - *p_rate = rate; + req->best_parent_hw = p; + req->best_parent_rate = rate; + req->rate = f->freq; - return f->freq; + return 0; } -static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, unsigned long max_rate, - unsigned long *p_rate, struct clk_hw **p) +static int clk_rcg_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { struct clk_rcg *rcg = to_clk_rcg(hw); - return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate, - max_rate, p_rate, p, rcg->s.parent_map); + return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, + rcg->s.parent_map); } -static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, unsigned long max_rate, - unsigned long *p_rate, struct clk_hw **p) +static int clk_dyn_rcg_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw); u32 reg; @@ -464,24 +461,22 @@ static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate, bank = reg_to_bank(rcg, reg); s = &rcg->s[bank]; - return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate, - max_rate, p_rate, p, s->parent_map); + return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, s->parent_map); } -static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, unsigned long max_rate, - unsigned long *p_rate, struct clk_hw **p_hw) +static int clk_rcg_bypass_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { struct clk_rcg *rcg = to_clk_rcg(hw); const struct freq_tbl *f = rcg->freq_tbl; - struct clk *p; + struct clk_hw *p; int index = qcom_find_src_index(hw, rcg->s.parent_map, f->src); - p = clk_get_parent_by_index(hw->clk, index); - *p_hw = __clk_get_hw(p); - *p_rate = __clk_round_rate(p, rate); + req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index); + req->best_parent_rate = clk_hw_round_rate(p, req->rate); + req->rate = req->best_parent_rate; - return *p_rate; + return 0; } static int __clk_rcg_set_rate(struct clk_rcg *rcg, const struct freq_tbl *f) diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 92936f091..9aec1761f 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -63,7 +63,7 @@ static int clk_rcg2_is_enabled(struct clk_hw *hw) static u8 clk_rcg2_get_parent(struct clk_hw *hw) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); - int num_parents = __clk_get_num_parents(hw->clk); + int num_parents = clk_hw_get_num_parents(hw); u32 cfg; int i, ret; @@ -80,7 +80,7 @@ static u8 clk_rcg2_get_parent(struct clk_hw *hw) err: pr_debug("%s: Clock %s has invalid parent, using default.\n", - __func__, __clk_get_name(hw->clk)); + __func__, clk_hw_get_name(hw)); return 0; } @@ -89,7 +89,7 @@ static int update_config(struct clk_rcg2 *rcg) int count, ret; u32 cmd; struct clk_hw *hw = &rcg->clkr.hw; - const char *name = __clk_get_name(hw->clk); + const char *name = clk_hw_get_name(hw); ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, CMD_UPDATE, CMD_UPDATE); @@ -176,12 +176,11 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) return calc_rate(parent_rate, m, n, mode, hid_div); } -static long _freq_tbl_determine_rate(struct clk_hw *hw, - const struct freq_tbl *f, unsigned long rate, - unsigned long *p_rate, struct clk_hw **p_hw) +static int _freq_tbl_determine_rate(struct clk_hw *hw, + const struct freq_tbl *f, struct clk_rate_request *req) { - unsigned long clk_flags; - struct clk *p; + unsigned long clk_flags, rate = req->rate; + struct clk_hw *p; struct clk_rcg2 *rcg = to_clk_rcg2(hw); int index; @@ -193,8 +192,8 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw, if (index < 0) return index; - clk_flags = __clk_get_flags(hw->clk); - p = clk_get_parent_by_index(hw->clk, index); + clk_flags = clk_hw_get_flags(hw); + p = clk_hw_get_parent_by_index(hw, index); if (clk_flags & CLK_SET_RATE_PARENT) { if (f->pre_div) { rate /= 2; @@ -208,21 +207,21 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw, rate = tmp; } } else { - rate = __clk_get_rate(p); + rate = clk_hw_get_rate(p); } - *p_hw = __clk_get_hw(p); - *p_rate = rate; + req->best_parent_hw = p; + req->best_parent_rate = rate; + req->rate = f->freq; - return f->freq; + return 0; } -static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, unsigned long max_rate, - unsigned long *p_rate, struct clk_hw **p) +static int clk_rcg2_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); - return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p); + return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req); } static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) @@ -374,35 +373,33 @@ static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw, return clk_edp_pixel_set_rate(hw, rate, parent_rate); } -static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *p_rate, struct clk_hw **p) +static int clk_edp_pixel_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); const struct freq_tbl *f = rcg->freq_tbl; const struct frac_entry *frac; int delta = 100000; - s64 src_rate = *p_rate; s64 request; u32 mask = BIT(rcg->hid_width) - 1; u32 hid_div; int index = qcom_find_src_index(hw, rcg->parent_map, f->src); /* Force the correct parent */ - *p = __clk_get_hw(clk_get_parent_by_index(hw->clk, index)); + req->best_parent_hw = clk_hw_get_parent_by_index(hw, index); + req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw); - if (src_rate == 810000000) + if (req->best_parent_rate == 810000000) frac = frac_table_810m; else frac = frac_table_675m; for (; frac->num; frac++) { - request = rate; + request = req->rate; request *= frac->den; request = div_s64(request, frac->num); - if ((src_rate < (request - delta)) || - (src_rate > (request + delta))) + if ((req->best_parent_rate < (request - delta)) || + (req->best_parent_rate > (request + delta))) continue; regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, @@ -410,8 +407,10 @@ static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate, hid_div >>= CFG_SRC_DIV_SHIFT; hid_div &= mask; - return calc_rate(src_rate, frac->num, frac->den, !!frac->den, - hid_div); + req->rate = calc_rate(req->best_parent_rate, + frac->num, frac->den, + !!frac->den, hid_div); + return 0; } return -EINVAL; @@ -428,28 +427,28 @@ const struct clk_ops clk_edp_pixel_ops = { }; EXPORT_SYMBOL_GPL(clk_edp_pixel_ops); -static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, unsigned long max_rate, - unsigned long *p_rate, struct clk_hw **p_hw) +static int clk_byte_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); const struct freq_tbl *f = rcg->freq_tbl; int index = qcom_find_src_index(hw, rcg->parent_map, f->src); unsigned long parent_rate, div; u32 mask = BIT(rcg->hid_width) - 1; - struct clk *p; + struct clk_hw *p; - if (rate == 0) + if (req->rate == 0) return -EINVAL; - p = clk_get_parent_by_index(hw->clk, index); - *p_hw = __clk_get_hw(p); - *p_rate = parent_rate = __clk_round_rate(p, rate); + req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index); + req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate); - div = DIV_ROUND_UP((2 * parent_rate), rate) - 1; + div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1; div = min_t(u32, div, mask); - return calc_rate(parent_rate, 0, 0, 0, div); + req->rate = calc_rate(parent_rate, 0, 0, 0, div); + + return 0; } static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate, @@ -494,10 +493,8 @@ static const struct frac_entry frac_table_pixel[] = { { } }; -static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *p_rate, struct clk_hw **p) +static int clk_pixel_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); unsigned long request, src_rate; @@ -505,20 +502,20 @@ static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate, const struct freq_tbl *f = rcg->freq_tbl; const struct frac_entry *frac = frac_table_pixel; int index = qcom_find_src_index(hw, rcg->parent_map, f->src); - struct clk *parent = clk_get_parent_by_index(hw->clk, index); - *p = __clk_get_hw(parent); + req->best_parent_hw = clk_hw_get_parent_by_index(hw, index); for (; frac->num; frac++) { - request = (rate * frac->den) / frac->num; + request = (req->rate * frac->den) / frac->num; - src_rate = __clk_round_rate(parent, request); + src_rate = clk_hw_round_rate(req->best_parent_hw, request); if ((src_rate < (request - delta)) || (src_rate > (request + delta))) continue; - *p_rate = src_rate; - return (src_rate * frac->num) / frac->den; + req->best_parent_rate = src_rate; + req->rate = (src_rate * frac->num) / frac->den; + return 0; } return -EINVAL; diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c index f7101e330..2dedceefd 100644 --- a/drivers/clk/qcom/common.c +++ b/drivers/clk/qcom/common.c @@ -12,6 +12,7 @@ */ #include +#include #include #include #include @@ -45,7 +46,7 @@ EXPORT_SYMBOL_GPL(qcom_find_freq); int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src) { - int i, num_parents = __clk_get_num_parents(hw->clk); + int i, num_parents = clk_hw_get_num_parents(hw); for (i = 0; i < num_parents; i++) if (src == map[i].src) @@ -144,3 +145,5 @@ void qcom_cc_remove(struct platform_device *pdev) reset_controller_unregister(platform_get_drvdata(pdev)); } EXPORT_SYMBOL_GPL(qcom_cc_remove); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c index 457c54058..3563019b8 100644 --- a/drivers/clk/qcom/gcc-apq8084.c +++ b/drivers/clk/qcom/gcc-apq8084.c @@ -48,7 +48,7 @@ static const struct parent_map gcc_xo_gpll0_map[] = { { P_GPLL0, 1 } }; -static const char *gcc_xo_gpll0[] = { +static const char * const gcc_xo_gpll0[] = { "xo", "gpll0_vote", }; @@ -59,7 +59,7 @@ static const struct parent_map gcc_xo_gpll0_gpll4_map[] = { { P_GPLL4, 5 } }; -static const char *gcc_xo_gpll0_gpll4[] = { +static const char * const gcc_xo_gpll0_gpll4[] = { "xo", "gpll0_vote", "gpll4_vote", @@ -70,7 +70,7 @@ static const struct parent_map gcc_xo_sata_asic0_map[] = { { P_SATA_ASIC0_CLK, 2 } }; -static const char *gcc_xo_sata_asic0[] = { +static const char * const gcc_xo_sata_asic0[] = { "xo", "sata_asic0_clk", }; @@ -80,7 +80,7 @@ static const struct parent_map gcc_xo_sata_rx_map[] = { { P_SATA_RX_CLK, 2} }; -static const char *gcc_xo_sata_rx[] = { +static const char * const gcc_xo_sata_rx[] = { "xo", "sata_rx_clk", }; @@ -90,7 +90,7 @@ static const struct parent_map gcc_xo_pcie_map[] = { { P_PCIE_0_1_PIPE_CLK, 2 } }; -static const char *gcc_xo_pcie[] = { +static const char * const gcc_xo_pcie[] = { "xo", "pcie_pipe", }; @@ -100,7 +100,7 @@ static const struct parent_map gcc_xo_pcie_sleep_map[] = { { P_SLEEP_CLK, 6 } }; -static const char *gcc_xo_pcie_sleep[] = { +static const char * const gcc_xo_pcie_sleep[] = { "xo", "sleep_clk_src", }; diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c index 563969942..40e480220 100644 --- a/drivers/clk/qcom/gcc-ipq806x.c +++ b/drivers/clk/qcom/gcc-ipq806x.c @@ -188,7 +188,7 @@ static const struct parent_map gcc_pxo_pll8_map[] = { { P_PLL8, 3 } }; -static const char *gcc_pxo_pll8[] = { +static const char * const gcc_pxo_pll8[] = { "pxo", "pll8_vote", }; @@ -199,7 +199,7 @@ static const struct parent_map gcc_pxo_pll8_cxo_map[] = { { P_CXO, 5 } }; -static const char *gcc_pxo_pll8_cxo[] = { +static const char * const gcc_pxo_pll8_cxo[] = { "pxo", "pll8_vote", "cxo", @@ -215,7 +215,7 @@ static const struct parent_map gcc_pxo_pll3_sata_map[] = { { P_PLL3, 6 } }; -static const char *gcc_pxo_pll3[] = { +static const char * const gcc_pxo_pll3[] = { "pxo", "pll3", }; @@ -226,7 +226,7 @@ static const struct parent_map gcc_pxo_pll8_pll0[] = { { P_PLL0, 2 } }; -static const char *gcc_pxo_pll8_pll0_map[] = { +static const char * const gcc_pxo_pll8_pll0_map[] = { "pxo", "pll8_vote", "pll0_vote", @@ -240,7 +240,7 @@ static const struct parent_map gcc_pxo_pll8_pll14_pll18_pll0_map[] = { { P_PLL18, 1 } }; -static const char *gcc_pxo_pll8_pll14_pll18_pll0[] = { +static const char * const gcc_pxo_pll8_pll14_pll18_pll0[] = { "pxo", "pll8_vote", "pll0_vote", diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c index fc6b12da5..b02826ed7 100644 --- a/drivers/clk/qcom/gcc-msm8660.c +++ b/drivers/clk/qcom/gcc-msm8660.c @@ -70,7 +70,7 @@ static const struct parent_map gcc_pxo_pll8_map[] = { { P_PLL8, 3 } }; -static const char *gcc_pxo_pll8[] = { +static const char * const gcc_pxo_pll8[] = { "pxo", "pll8_vote", }; @@ -81,7 +81,7 @@ static const struct parent_map gcc_pxo_pll8_cxo_map[] = { { P_CXO, 5 } }; -static const char *gcc_pxo_pll8_cxo[] = { +static const char * const gcc_pxo_pll8_cxo[] = { "pxo", "pll8_vote", "cxo", @@ -1917,7 +1917,7 @@ static struct clk_rcg usb_fs1_xcvr_fs_src = { } }; -static const char *usb_fs1_xcvr_fs_src_p[] = { "usb_fs1_xcvr_fs_src" }; +static const char * const usb_fs1_xcvr_fs_src_p[] = { "usb_fs1_xcvr_fs_src" }; static struct clk_branch usb_fs1_xcvr_fs_clk = { .halt_reg = 0x2fcc, @@ -1984,7 +1984,7 @@ static struct clk_rcg usb_fs2_xcvr_fs_src = { } }; -static const char *usb_fs2_xcvr_fs_src_p[] = { "usb_fs2_xcvr_fs_src" }; +static const char * const usb_fs2_xcvr_fs_src_p[] = { "usb_fs2_xcvr_fs_src" }; static struct clk_branch usb_fs2_xcvr_fs_clk = { .halt_reg = 0x2fcc, diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c index 5d75bffab..22a4e1e73 100644 --- a/drivers/clk/qcom/gcc-msm8916.c +++ b/drivers/clk/qcom/gcc-msm8916.c @@ -51,7 +51,7 @@ static const struct parent_map gcc_xo_gpll0_map[] = { { P_GPLL0, 1 }, }; -static const char *gcc_xo_gpll0[] = { +static const char * const gcc_xo_gpll0[] = { "xo", "gpll0_vote", }; @@ -62,7 +62,7 @@ static const struct parent_map gcc_xo_gpll0_bimc_map[] = { { P_BIMC, 2 }, }; -static const char *gcc_xo_gpll0_bimc[] = { +static const char * const gcc_xo_gpll0_bimc[] = { "xo", "gpll0_vote", "bimc_pll_vote", @@ -75,7 +75,7 @@ static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2a_map[] = { { P_GPLL2_AUX, 2 }, }; -static const char *gcc_xo_gpll0a_gpll1_gpll2a[] = { +static const char * const gcc_xo_gpll0a_gpll1_gpll2a[] = { "xo", "gpll0_vote", "gpll1_vote", @@ -88,7 +88,7 @@ static const struct parent_map gcc_xo_gpll0_gpll2_map[] = { { P_GPLL2, 2 }, }; -static const char *gcc_xo_gpll0_gpll2[] = { +static const char * const gcc_xo_gpll0_gpll2[] = { "xo", "gpll0_vote", "gpll2_vote", @@ -99,7 +99,7 @@ static const struct parent_map gcc_xo_gpll0a_map[] = { { P_GPLL0_AUX, 2 }, }; -static const char *gcc_xo_gpll0a[] = { +static const char * const gcc_xo_gpll0a[] = { "xo", "gpll0_vote", }; @@ -111,7 +111,7 @@ static const struct parent_map gcc_xo_gpll0_gpll1a_sleep_map[] = { { P_SLEEP_CLK, 6 }, }; -static const char *gcc_xo_gpll0_gpll1a_sleep[] = { +static const char * const gcc_xo_gpll0_gpll1a_sleep[] = { "xo", "gpll0_vote", "gpll1_vote", @@ -124,7 +124,7 @@ static const struct parent_map gcc_xo_gpll0_gpll1a_map[] = { { P_GPLL1_AUX, 2 }, }; -static const char *gcc_xo_gpll0_gpll1a[] = { +static const char * const gcc_xo_gpll0_gpll1a[] = { "xo", "gpll0_vote", "gpll1_vote", @@ -135,7 +135,7 @@ static const struct parent_map gcc_xo_dsibyte_map[] = { { P_DSI0_PHYPLL_BYTE, 2 }, }; -static const char *gcc_xo_dsibyte[] = { +static const char * const gcc_xo_dsibyte[] = { "xo", "dsi0pllbyte", }; @@ -146,7 +146,7 @@ static const struct parent_map gcc_xo_gpll0a_dsibyte_map[] = { { P_DSI0_PHYPLL_BYTE, 1 }, }; -static const char *gcc_xo_gpll0a_dsibyte[] = { +static const char * const gcc_xo_gpll0a_dsibyte[] = { "xo", "gpll0_vote", "dsi0pllbyte", @@ -158,7 +158,7 @@ static const struct parent_map gcc_xo_gpll0_dsiphy_map[] = { { P_DSI0_PHYPLL_DSI, 2 }, }; -static const char *gcc_xo_gpll0_dsiphy[] = { +static const char * const gcc_xo_gpll0_dsiphy[] = { "xo", "gpll0_vote", "dsi0pll", @@ -170,7 +170,7 @@ static const struct parent_map gcc_xo_gpll0a_dsiphy_map[] = { { P_DSI0_PHYPLL_DSI, 1 }, }; -static const char *gcc_xo_gpll0a_dsiphy[] = { +static const char * const gcc_xo_gpll0a_dsiphy[] = { "xo", "gpll0_vote", "dsi0pll", @@ -183,7 +183,7 @@ static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2_map[] = { { P_GPLL2, 2 }, }; -static const char *gcc_xo_gpll0a_gpll1_gpll2[] = { +static const char * const gcc_xo_gpll0a_gpll1_gpll2[] = { "xo", "gpll0_vote", "gpll1_vote", diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c index eb6a4f9fa..aa294b1ba 100644 --- a/drivers/clk/qcom/gcc-msm8960.c +++ b/drivers/clk/qcom/gcc-msm8960.c @@ -125,7 +125,7 @@ static const struct parent_map gcc_pxo_pll8_map[] = { { P_PLL8, 3 } }; -static const char *gcc_pxo_pll8[] = { +static const char * const gcc_pxo_pll8[] = { "pxo", "pll8_vote", }; @@ -136,7 +136,7 @@ static const struct parent_map gcc_pxo_pll8_cxo_map[] = { { P_CXO, 5 } }; -static const char *gcc_pxo_pll8_cxo[] = { +static const char * const gcc_pxo_pll8_cxo[] = { "pxo", "pll8_vote", "cxo", @@ -148,7 +148,7 @@ static const struct parent_map gcc_pxo_pll8_pll3_map[] = { { P_PLL3, 6 } }; -static const char *gcc_pxo_pll8_pll3[] = { +static const char * const gcc_pxo_pll8_pll3[] = { "pxo", "pll8_vote", "pll3", @@ -2085,7 +2085,7 @@ static struct clk_rcg usb_hsic_xcvr_fs_src = { } }; -static const char *usb_hsic_xcvr_fs_src_p[] = { "usb_hsic_xcvr_fs_src" }; +static const char * const usb_hsic_xcvr_fs_src_p[] = { "usb_hsic_xcvr_fs_src" }; static struct clk_branch usb_hsic_xcvr_fs_clk = { .halt_reg = 0x2fc8, @@ -2181,7 +2181,7 @@ static struct clk_rcg usb_fs1_xcvr_fs_src = { } }; -static const char *usb_fs1_xcvr_fs_src_p[] = { "usb_fs1_xcvr_fs_src" }; +static const char * const usb_fs1_xcvr_fs_src_p[] = { "usb_fs1_xcvr_fs_src" }; static struct clk_branch usb_fs1_xcvr_fs_clk = { .halt_reg = 0x2fcc, @@ -2248,7 +2248,7 @@ static struct clk_rcg usb_fs2_xcvr_fs_src = { } }; -static const char *usb_fs2_xcvr_fs_src_p[] = { "usb_fs2_xcvr_fs_src" }; +static const char * const usb_fs2_xcvr_fs_src_p[] = { "usb_fs2_xcvr_fs_src" }; static struct clk_branch usb_fs2_xcvr_fs_clk = { .halt_reg = 0x2fcc, diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c index f06a082e3..2bcf87538 100644 --- a/drivers/clk/qcom/gcc-msm8974.c +++ b/drivers/clk/qcom/gcc-msm8974.c @@ -44,7 +44,7 @@ static const struct parent_map gcc_xo_gpll0_map[] = { { P_GPLL0, 1 } }; -static const char *gcc_xo_gpll0[] = { +static const char * const gcc_xo_gpll0[] = { "xo", "gpll0_vote", }; @@ -55,7 +55,7 @@ static const struct parent_map gcc_xo_gpll0_gpll4_map[] = { { P_GPLL4, 5 } }; -static const char *gcc_xo_gpll0_gpll4[] = { +static const char * const gcc_xo_gpll0_gpll4[] = { "xo", "gpll0_vote", "gpll4_vote", diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c index 47f0ac16d..93ad42b14 100644 --- a/drivers/clk/qcom/lcc-ipq806x.c +++ b/drivers/clk/qcom/lcc-ipq806x.c @@ -71,7 +71,7 @@ static const struct parent_map lcc_pxo_pll4_map[] = { { P_PLL4, 2 } }; -static const char *lcc_pxo_pll4[] = { +static const char * const lcc_pxo_pll4[] = { "pxo", "pll4_vote", }; @@ -146,7 +146,7 @@ static struct clk_rcg mi2s_osr_src = { }, }; -static const char *lcc_mi2s_parents[] = { +static const char * const lcc_mi2s_parents[] = { "mi2s_osr_src", }; @@ -340,7 +340,7 @@ static struct clk_rcg spdif_src = { }, }; -static const char *lcc_spdif_parents[] = { +static const char * const lcc_spdif_parents[] = { "spdif_src", }; diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c index d0df9d5fc..ecb96c284 100644 --- a/drivers/clk/qcom/lcc-msm8960.c +++ b/drivers/clk/qcom/lcc-msm8960.c @@ -57,7 +57,7 @@ static const struct parent_map lcc_pxo_pll4_map[] = { { P_PLL4, 2 } }; -static const char *lcc_pxo_pll4[] = { +static const char * const lcc_pxo_pll4[] = { "pxo", "pll4_vote", }; @@ -127,7 +127,7 @@ static struct clk_rcg mi2s_osr_src = { }, }; -static const char *lcc_mi2s_parents[] = { +static const char * const lcc_mi2s_parents[] = { "mi2s_osr_src", }; @@ -233,7 +233,7 @@ static struct clk_rcg prefix##_osr_src = { \ }, \ }; \ \ -static const char *lcc_##prefix##_parents[] = { \ +static const char * const lcc_##prefix##_parents[] = { \ #prefix "_osr_src", \ }; \ \ @@ -445,7 +445,7 @@ static struct clk_rcg slimbus_src = { }, }; -static const char *lcc_slimbus_parents[] = { +static const char * const lcc_slimbus_parents[] = { "slimbus_src", }; diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c index 1b17df2cb..f0ee6bde1 100644 --- a/drivers/clk/qcom/mmcc-apq8084.c +++ b/drivers/clk/qcom/mmcc-apq8084.c @@ -53,7 +53,7 @@ static const struct parent_map mmcc_xo_mmpll0_mmpll1_gpll0_map[] = { { P_GPLL0, 5 } }; -static const char *mmcc_xo_mmpll0_mmpll1_gpll0[] = { +static const char * const mmcc_xo_mmpll0_mmpll1_gpll0[] = { "xo", "mmpll0_vote", "mmpll1_vote", @@ -69,7 +69,7 @@ static const struct parent_map mmcc_xo_mmpll0_dsi_hdmi_gpll0_map[] = { { P_DSI1PLL, 3 } }; -static const char *mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = { +static const char * const mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = { "xo", "mmpll0_vote", "hdmipll", @@ -86,7 +86,7 @@ static const struct parent_map mmcc_xo_mmpll0_1_2_gpll0_map[] = { { P_MMPLL2, 3 } }; -static const char *mmcc_xo_mmpll0_1_2_gpll0[] = { +static const char * const mmcc_xo_mmpll0_1_2_gpll0[] = { "xo", "mmpll0_vote", "mmpll1_vote", @@ -102,7 +102,7 @@ static const struct parent_map mmcc_xo_mmpll0_1_3_gpll0_map[] = { { P_MMPLL3, 3 } }; -static const char *mmcc_xo_mmpll0_1_3_gpll0[] = { +static const char * const mmcc_xo_mmpll0_1_3_gpll0[] = { "xo", "mmpll0_vote", "mmpll1_vote", @@ -119,7 +119,7 @@ static const struct parent_map mmcc_xo_dsi_hdmi_edp_map[] = { { P_DSI1PLL, 2 } }; -static const char *mmcc_xo_dsi_hdmi_edp[] = { +static const char * const mmcc_xo_dsi_hdmi_edp[] = { "xo", "edp_link_clk", "hdmipll", @@ -137,7 +137,7 @@ static const struct parent_map mmcc_xo_dsi_hdmi_edp_gpll0_map[] = { { P_DSI1PLL, 2 } }; -static const char *mmcc_xo_dsi_hdmi_edp_gpll0[] = { +static const char * const mmcc_xo_dsi_hdmi_edp_gpll0[] = { "xo", "edp_link_clk", "hdmipll", @@ -155,7 +155,7 @@ static const struct parent_map mmcc_xo_dsibyte_hdmi_edp_gpll0_map[] = { { P_DSI1PLL_BYTE, 2 } }; -static const char *mmcc_xo_dsibyte_hdmi_edp_gpll0[] = { +static const char * const mmcc_xo_dsibyte_hdmi_edp_gpll0[] = { "xo", "edp_link_clk", "hdmipll", @@ -172,7 +172,7 @@ static const struct parent_map mmcc_xo_mmpll0_1_4_gpll0_map[] = { { P_MMPLL4, 3 } }; -static const char *mmcc_xo_mmpll0_1_4_gpll0[] = { +static const char * const mmcc_xo_mmpll0_1_4_gpll0[] = { "xo", "mmpll0", "mmpll1", @@ -189,7 +189,7 @@ static const struct parent_map mmcc_xo_mmpll0_1_4_gpll1_0_map[] = { { P_GPLL1, 4 } }; -static const char *mmcc_xo_mmpll0_1_4_gpll1_0[] = { +static const char * const mmcc_xo_mmpll0_1_4_gpll1_0[] = { "xo", "mmpll0", "mmpll1", @@ -208,7 +208,7 @@ static const struct parent_map mmcc_xo_mmpll0_1_4_gpll1_0_sleep_map[] = { { P_MMSLEEP, 6 } }; -static const char *mmcc_xo_mmpll0_1_4_gpll1_0_sleep[] = { +static const char * const mmcc_xo_mmpll0_1_4_gpll1_0_sleep[] = { "xo", "mmpll0", "mmpll1", diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c index 9711bca9c..bad02aebf 100644 --- a/drivers/clk/qcom/mmcc-msm8960.c +++ b/drivers/clk/qcom/mmcc-msm8960.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -50,7 +51,7 @@ static const struct parent_map mmcc_pxo_pll8_pll2_map[] = { { P_PLL2, 1 } }; -static const char *mmcc_pxo_pll8_pll2[] = { +static const char * const mmcc_pxo_pll8_pll2[] = { "pxo", "pll8_vote", "pll2", @@ -63,7 +64,7 @@ static const struct parent_map mmcc_pxo_pll8_pll2_pll3_map[] = { { P_PLL3, 3 } }; -static const char *mmcc_pxo_pll8_pll2_pll15[] = { +static const char * const mmcc_pxo_pll8_pll2_pll15[] = { "pxo", "pll8_vote", "pll2", @@ -77,7 +78,7 @@ static const struct parent_map mmcc_pxo_pll8_pll2_pll15_map[] = { { P_PLL15, 3 } }; -static const char *mmcc_pxo_pll8_pll2_pll3[] = { +static const char * const mmcc_pxo_pll8_pll2_pll3[] = { "pxo", "pll8_vote", "pll2", @@ -508,8 +509,7 @@ static int pix_rdi_set_parent(struct clk_hw *hw, u8 index) int ret = 0; u32 val; struct clk_pix_rdi *rdi = to_clk_pix_rdi(hw); - struct clk *clk = hw->clk; - int num_parents = __clk_get_num_parents(hw->clk); + int num_parents = clk_hw_get_num_parents(hw); /* * These clocks select three inputs via two muxes. One mux selects @@ -520,7 +520,8 @@ static int pix_rdi_set_parent(struct clk_hw *hw, u8 index) * needs to be on at what time. */ for (i = 0; i < num_parents; i++) { - ret = clk_prepare_enable(clk_get_parent_by_index(clk, i)); + struct clk_hw *p = clk_hw_get_parent_by_index(hw, i); + ret = clk_prepare_enable(p->clk); if (ret) goto err; } @@ -548,8 +549,10 @@ static int pix_rdi_set_parent(struct clk_hw *hw, u8 index) udelay(1); err: - for (i--; i >= 0; i--) - clk_disable_unprepare(clk_get_parent_by_index(clk, i)); + for (i--; i >= 0; i--) { + struct clk_hw *p = clk_hw_get_parent_by_index(hw, i); + clk_disable_unprepare(p->clk); + } return ret; } @@ -579,7 +582,7 @@ static const struct clk_ops clk_ops_pix_rdi = { .determine_rate = __clk_mux_determine_rate, }; -static const char *pix_rdi_parents[] = { +static const char * const pix_rdi_parents[] = { "csi0_clk", "csi1_clk", "csi2_clk", @@ -709,7 +712,7 @@ static struct clk_rcg csiphytimer_src = { }, }; -static const char *csixphy_timer_src[] = { "csiphytimer_src" }; +static const char * const csixphy_timer_src[] = { "csiphytimer_src" }; static struct clk_branch csiphy0_timer_clk = { .halt_reg = 0x01e8, @@ -1385,7 +1388,7 @@ static const struct parent_map mmcc_pxo_hdmi_map[] = { { P_HDMI_PLL, 3 } }; -static const char *mmcc_pxo_hdmi[] = { +static const char * const mmcc_pxo_hdmi[] = { "pxo", "hdmi_pll", }; @@ -1428,7 +1431,7 @@ static struct clk_rcg tv_src = { }, }; -static const char *tv_src_name[] = { "tv_src" }; +static const char * const tv_src_name[] = { "tv_src" }; static struct clk_branch tv_enc_clk = { .halt_reg = 0x01d4, diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c index 07f4cc159..0987bf443 100644 --- a/drivers/clk/qcom/mmcc-msm8974.c +++ b/drivers/clk/qcom/mmcc-msm8974.c @@ -56,7 +56,7 @@ static const struct parent_map mmcc_xo_mmpll0_mmpll1_gpll0_map[] = { { P_GPLL0, 5 } }; -static const char *mmcc_xo_mmpll0_mmpll1_gpll0[] = { +static const char * const mmcc_xo_mmpll0_mmpll1_gpll0[] = { "xo", "mmpll0_vote", "mmpll1_vote", @@ -72,7 +72,7 @@ static const struct parent_map mmcc_xo_mmpll0_dsi_hdmi_gpll0_map[] = { { P_DSI1PLL, 3 } }; -static const char *mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = { +static const char * const mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = { "xo", "mmpll0_vote", "hdmipll", @@ -89,7 +89,7 @@ static const struct parent_map mmcc_xo_mmpll0_1_2_gpll0_map[] = { { P_MMPLL2, 3 } }; -static const char *mmcc_xo_mmpll0_1_2_gpll0[] = { +static const char * const mmcc_xo_mmpll0_1_2_gpll0[] = { "xo", "mmpll0_vote", "mmpll1_vote", @@ -105,7 +105,7 @@ static const struct parent_map mmcc_xo_mmpll0_1_3_gpll0_map[] = { { P_MMPLL3, 3 } }; -static const char *mmcc_xo_mmpll0_1_3_gpll0[] = { +static const char * const mmcc_xo_mmpll0_1_3_gpll0[] = { "xo", "mmpll0_vote", "mmpll1_vote", @@ -121,7 +121,7 @@ static const struct parent_map mmcc_xo_mmpll0_1_gpll1_0_map[] = { { P_GPLL1, 4 } }; -static const char *mmcc_xo_mmpll0_1_gpll1_0[] = { +static const char * const mmcc_xo_mmpll0_1_gpll1_0[] = { "xo", "mmpll0_vote", "mmpll1_vote", @@ -138,7 +138,7 @@ static const struct parent_map mmcc_xo_dsi_hdmi_edp_map[] = { { P_DSI1PLL, 2 } }; -static const char *mmcc_xo_dsi_hdmi_edp[] = { +static const char * const mmcc_xo_dsi_hdmi_edp[] = { "xo", "edp_link_clk", "hdmipll", @@ -156,7 +156,7 @@ static const struct parent_map mmcc_xo_dsi_hdmi_edp_gpll0_map[] = { { P_DSI1PLL, 2 } }; -static const char *mmcc_xo_dsi_hdmi_edp_gpll0[] = { +static const char * const mmcc_xo_dsi_hdmi_edp_gpll0[] = { "xo", "edp_link_clk", "hdmipll", @@ -174,7 +174,7 @@ static const struct parent_map mmcc_xo_dsibyte_hdmi_edp_gpll0_map[] = { { P_DSI1PLL_BYTE, 2 } }; -static const char *mmcc_xo_dsibyte_hdmi_edp_gpll0[] = { +static const char * const mmcc_xo_dsibyte_hdmi_edp_gpll0[] = { "xo", "edp_link_clk", "hdmipll", diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile index 2714097f9..b27edd6c8 100644 --- a/drivers/clk/rockchip/Makefile +++ b/drivers/clk/rockchip/Makefile @@ -6,8 +6,10 @@ obj-y += clk-rockchip.o obj-y += clk.o obj-y += clk-pll.o obj-y += clk-cpu.o +obj-y += clk-inverter.o obj-y += clk-mmc-phase.o obj-$(CONFIG_RESET_CONTROLLER) += softrst.o obj-y += clk-rk3188.o obj-y += clk-rk3288.o +obj-y += clk-rk3368.o diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c index fb7721bd3..330870a6d 100644 --- a/drivers/clk/rockchip/clk-cpu.c +++ b/drivers/clk/rockchip/clk-cpu.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include "clk.h" diff --git a/drivers/clk/rockchip/clk-inverter.c b/drivers/clk/rockchip/clk-inverter.c new file mode 100644 index 000000000..7cbf43beb --- /dev/null +++ b/drivers/clk/rockchip/clk-inverter.c @@ -0,0 +1,116 @@ +/* + * Copyright 2015 Heiko Stuebner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include "clk.h" + +struct rockchip_inv_clock { + struct clk_hw hw; + void __iomem *reg; + int shift; + int flags; + spinlock_t *lock; +}; + +#define to_inv_clock(_hw) container_of(_hw, struct rockchip_inv_clock, hw) + +#define INVERTER_MASK 0x1 + +static int rockchip_inv_get_phase(struct clk_hw *hw) +{ + struct rockchip_inv_clock *inv_clock = to_inv_clock(hw); + u32 val; + + val = readl(inv_clock->reg) >> inv_clock->shift; + val &= INVERTER_MASK; + return val ? 180 : 0; +} + +static int rockchip_inv_set_phase(struct clk_hw *hw, int degrees) +{ + struct rockchip_inv_clock *inv_clock = to_inv_clock(hw); + u32 val; + + if (degrees % 180 == 0) { + val = !!degrees; + } else { + pr_err("%s: unsupported phase %d for %s\n", + __func__, degrees, clk_hw_get_name(hw)); + return -EINVAL; + } + + if (inv_clock->flags & ROCKCHIP_INVERTER_HIWORD_MASK) { + writel(HIWORD_UPDATE(val, INVERTER_MASK, inv_clock->shift), + inv_clock->reg); + } else { + unsigned long flags; + u32 reg; + + spin_lock_irqsave(inv_clock->lock, flags); + + reg = readl(inv_clock->reg); + reg &= ~BIT(inv_clock->shift); + reg |= val; + writel(reg, inv_clock->reg); + + spin_unlock_irqrestore(inv_clock->lock, flags); + } + + return 0; +} + +static const struct clk_ops rockchip_inv_clk_ops = { + .get_phase = rockchip_inv_get_phase, + .set_phase = rockchip_inv_set_phase, +}; + +struct clk *rockchip_clk_register_inverter(const char *name, + const char *const *parent_names, u8 num_parents, + void __iomem *reg, int shift, int flags, + spinlock_t *lock) +{ + struct clk_init_data init; + struct rockchip_inv_clock *inv_clock; + struct clk *clk; + + inv_clock = kmalloc(sizeof(*inv_clock), GFP_KERNEL); + if (!inv_clock) + return NULL; + + init.name = name; + init.num_parents = num_parents; + init.flags = CLK_SET_RATE_PARENT; + init.parent_names = parent_names; + init.ops = &rockchip_inv_clk_ops; + + inv_clock->hw.init = &init; + inv_clock->reg = reg; + inv_clock->shift = shift; + inv_clock->flags = flags; + inv_clock->lock = lock; + + clk = clk_register(NULL, &inv_clock->hw); + if (IS_ERR(clk)) + goto err_free; + + return clk; + +err_free: + kfree(inv_clock); + return NULL; +} diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c index e9f8df324..9b613426e 100644 --- a/drivers/clk/rockchip/clk-mmc-phase.c +++ b/drivers/clk/rockchip/clk-mmc-phase.c @@ -14,7 +14,10 @@ */ #include +#include #include +#include +#include #include "clk.h" struct rockchip_mmc_clock { @@ -105,7 +108,7 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees) writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift), mmc_clock->reg); pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n", - __clk_get_name(hw->clk), degrees, delay_num, + clk_hw_get_name(hw), degrees, delay_num, mmc_clock->reg, raw_value>>(mmc_clock->shift), rockchip_mmc_get_phase(hw) ); @@ -131,6 +134,7 @@ struct clk *rockchip_clk_register_mmc(const char *name, if (!mmc_clock) return NULL; + init.name = name; init.num_parents = num_parents; init.parent_names = parent_names; init.ops = &rockchip_mmc_clk_ops; @@ -139,9 +143,6 @@ struct clk *rockchip_clk_register_mmc(const char *name, mmc_clock->reg = reg; mmc_clock->shift = shift; - if (name) - init.name = name; - clk = clk_register(NULL, &mmc_clock->hw); if (IS_ERR(clk)) goto err_free; diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c index 76027261f..7737a1df1 100644 --- a/drivers/clk/rockchip/clk-pll.c +++ b/drivers/clk/rockchip/clk-pll.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include "clk.h" @@ -121,8 +120,8 @@ static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll) #define RK3066_PLLCON0_NR_SHIFT 8 #define RK3066_PLLCON1_NF_MASK 0x1fff #define RK3066_PLLCON1_NF_SHIFT 0 -#define RK3066_PLLCON2_BWADJ_MASK 0xfff -#define RK3066_PLLCON2_BWADJ_SHIFT 0 +#define RK3066_PLLCON2_NB_MASK 0xfff +#define RK3066_PLLCON2_NB_SHIFT 0 #define RK3066_PLLCON3_RESET (1 << 5) #define RK3066_PLLCON3_PWRDOWN (1 << 1) #define RK3066_PLLCON3_BYPASS (1 << 0) @@ -137,7 +136,7 @@ static unsigned long rockchip_rk3066_pll_recalc_rate(struct clk_hw *hw, pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(3)); if (pllcon & RK3066_PLLCON3_BYPASS) { pr_debug("%s: pll %s is bypassed\n", __func__, - __clk_get_name(hw->clk)); + clk_hw_get_name(hw)); return prate; } @@ -175,13 +174,13 @@ static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate, } pr_debug("%s: changing %s from %lu to %lu with a parent rate of %lu\n", - __func__, __clk_get_name(hw->clk), old_rate, drate, prate); + __func__, clk_hw_get_name(hw), old_rate, drate, prate); /* Get required rate settings from table */ rate = rockchip_get_pll_settings(pll, drate); if (!rate) { pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__, - drate, __clk_get_name(hw->clk)); + drate, clk_hw_get_name(hw)); return -EINVAL; } @@ -208,8 +207,8 @@ static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate, writel_relaxed(HIWORD_UPDATE(rate->nf - 1, RK3066_PLLCON1_NF_MASK, RK3066_PLLCON1_NF_SHIFT), pll->reg_base + RK3066_PLLCON(1)); - writel_relaxed(HIWORD_UPDATE(rate->bwadj, RK3066_PLLCON2_BWADJ_MASK, - RK3066_PLLCON2_BWADJ_SHIFT), + writel_relaxed(HIWORD_UPDATE(rate->nb - 1, RK3066_PLLCON2_NB_MASK, + RK3066_PLLCON2_NB_SHIFT), pll->reg_base + RK3066_PLLCON(2)); /* leave reset and wait the reset_delay */ @@ -262,14 +261,14 @@ static void rockchip_rk3066_pll_init(struct clk_hw *hw) { struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); const struct rockchip_pll_rate_table *rate; - unsigned int nf, nr, no, bwadj; + unsigned int nf, nr, no, nb; unsigned long drate; u32 pllcon; if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE)) return; - drate = __clk_get_rate(hw->clk); + drate = clk_hw_get_rate(hw); rate = rockchip_get_pll_settings(pll, drate); /* when no rate setting for the current rate, rely on clk_set_rate */ @@ -284,25 +283,25 @@ static void rockchip_rk3066_pll_init(struct clk_hw *hw) nf = ((pllcon >> RK3066_PLLCON1_NF_SHIFT) & RK3066_PLLCON1_NF_MASK) + 1; pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(2)); - bwadj = (pllcon >> RK3066_PLLCON2_BWADJ_SHIFT) & RK3066_PLLCON2_BWADJ_MASK; + nb = ((pllcon >> RK3066_PLLCON2_NB_SHIFT) & RK3066_PLLCON2_NB_MASK) + 1; - pr_debug("%s: pll %s@%lu: nr (%d:%d); no (%d:%d); nf(%d:%d), bwadj(%d:%d)\n", - __func__, __clk_get_name(hw->clk), drate, rate->nr, nr, - rate->no, no, rate->nf, nf, rate->bwadj, bwadj); + pr_debug("%s: pll %s@%lu: nr (%d:%d); no (%d:%d); nf(%d:%d), nb(%d:%d)\n", + __func__, clk_hw_get_name(hw), drate, rate->nr, nr, + rate->no, no, rate->nf, nf, rate->nb, nb); if (rate->nr != nr || rate->no != no || rate->nf != nf - || rate->bwadj != bwadj) { - struct clk *parent = __clk_get_parent(hw->clk); + || rate->nb != nb) { + struct clk_hw *parent = clk_hw_get_parent(hw); unsigned long prate; if (!parent) { pr_warn("%s: parent of %s not available\n", - __func__, __clk_get_name(hw->clk)); + __func__, clk_hw_get_name(hw)); return; } pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n", - __func__, __clk_get_name(hw->clk)); - prate = __clk_get_rate(parent); + __func__, clk_hw_get_name(hw)); + prate = clk_hw_get_rate(parent); rockchip_rk3066_pll_set_rate(hw, drate, prate); } } @@ -354,6 +353,35 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type, if (!pll) return ERR_PTR(-ENOMEM); + /* create the mux on top of the real pll */ + pll->pll_mux_ops = &clk_mux_ops; + pll_mux = &pll->pll_mux; + pll_mux->reg = base + mode_offset; + pll_mux->shift = mode_shift; + pll_mux->mask = PLL_MODE_MASK; + pll_mux->flags = 0; + pll_mux->lock = lock; + pll_mux->hw.init = &init; + + if (pll_type == pll_rk3066) + pll_mux->flags |= CLK_MUX_HIWORD_MASK; + + /* the actual muxing is xin24m, pll-output, xin32k */ + pll_parents[0] = parent_names[0]; + pll_parents[1] = pll_name; + pll_parents[2] = parent_names[1]; + + init.name = name; + init.flags = CLK_SET_RATE_PARENT; + init.ops = pll->pll_mux_ops; + init.parent_names = pll_parents; + init.num_parents = ARRAY_SIZE(pll_parents); + + mux_clk = clk_register(NULL, &pll_mux->hw); + if (IS_ERR(mux_clk)) + goto err_mux; + + /* now create the actual pll */ init.name = pll_name; /* keep all plls untouched for now */ @@ -399,47 +427,19 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type, pll->flags = clk_pll_flags; pll->lock = lock; - /* create the mux on top of the real pll */ - pll->pll_mux_ops = &clk_mux_ops; - pll_mux = &pll->pll_mux; - pll_mux->reg = base + mode_offset; - pll_mux->shift = mode_shift; - pll_mux->mask = PLL_MODE_MASK; - pll_mux->flags = 0; - pll_mux->lock = lock; - pll_mux->hw.init = &init; - - if (pll_type == pll_rk3066) - pll_mux->flags |= CLK_MUX_HIWORD_MASK; - pll_clk = clk_register(NULL, &pll->hw); if (IS_ERR(pll_clk)) { pr_err("%s: failed to register pll clock %s : %ld\n", __func__, name, PTR_ERR(pll_clk)); - mux_clk = pll_clk; goto err_pll; } - /* the actual muxing is xin24m, pll-output, xin32k */ - pll_parents[0] = parent_names[0]; - pll_parents[1] = pll_name; - pll_parents[2] = parent_names[1]; - - init.name = name; - init.flags = CLK_SET_RATE_PARENT; - init.ops = pll->pll_mux_ops; - init.parent_names = pll_parents; - init.num_parents = ARRAY_SIZE(pll_parents); - - mux_clk = clk_register(NULL, &pll_mux->hw); - if (IS_ERR(mux_clk)) - goto err_mux; - return mux_clk; -err_mux: - clk_unregister(pll_clk); err_pll: + clk_unregister(mux_clk); + mux_clk = pll_clk; +err_mux: kfree(pll); return mux_clk; } diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c index e4f9d472f..abb476087 100644 --- a/drivers/clk/rockchip/clk-rk3188.c +++ b/drivers/clk/rockchip/clk-rk3188.c @@ -13,6 +13,7 @@ * GNU General Public License for more details. */ +#include #include #include #include @@ -201,7 +202,7 @@ PNAME(mux_pll_src_cpll_gpll_p) = { "cpll", "gpll" }; PNAME(mux_aclk_cpu_p) = { "apll", "gpll" }; PNAME(mux_sclk_cif0_p) = { "cif0_pre", "xin24m" }; PNAME(mux_sclk_i2s0_p) = { "i2s0_pre", "i2s0_frac", "xin12m" }; -PNAME(mux_sclk_spdif_p) = { "spdif_src", "spdif_frac", "xin12m" }; +PNAME(mux_sclk_spdif_p) = { "spdif_pre", "spdif_frac", "xin12m" }; PNAME(mux_sclk_uart0_p) = { "uart0_pre", "uart0_frac", "xin24m" }; PNAME(mux_sclk_uart1_p) = { "uart1_pre", "uart1_frac", "xin24m" }; PNAME(mux_sclk_uart2_p) = { "uart2_pre", "uart2_frac", "xin24m" }; @@ -235,6 +236,7 @@ static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = { #define MFLAGS CLK_MUX_HIWORD_MASK #define DFLAGS CLK_DIVIDER_HIWORD_MASK #define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE) +#define IFLAGS ROCKCHIP_INVERTER_HIWORD_MASK /* 2 ^ (val + 1) */ static struct clk_div_table div_core_peri_t[] = { @@ -310,6 +312,8 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = { GATE(0, "pclkin_cif0", "ext_cif0", 0, RK2928_CLKGATE_CON(3), 3, GFLAGS), + INVERTER(0, "pclk_cif0", "pclkin_cif0", + RK2928_CLKSEL_CON(30), 8, IFLAGS), /* * the 480m are generated inside the usb block from these clocks, @@ -334,8 +338,10 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = { COMPOSITE_FRAC(0, "hsadc_frac", "hsadc_src", 0, RK2928_CLKSEL_CON(23), 0, RK2928_CLKGATE_CON(2), 7, GFLAGS), - MUX(SCLK_HSADC, "sclk_hsadc", mux_sclk_hsadc_p, 0, + MUX(0, "sclk_hsadc_out", mux_sclk_hsadc_p, 0, RK2928_CLKSEL_CON(22), 4, 2, MFLAGS), + INVERTER(SCLK_HSADC, "sclk_hsadc", "sclk_hsadc_out", + RK2928_CLKSEL_CON(22), 7, IFLAGS), COMPOSITE_NOMUX(SCLK_SARADC, "sclk_saradc", "xin24m", 0, RK2928_CLKSEL_CON(24), 8, 8, DFLAGS, @@ -344,10 +350,10 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = { COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0, RK2928_CLKSEL_CON(5), 0, 7, DFLAGS, RK2928_CLKGATE_CON(0), 13, GFLAGS), - COMPOSITE_FRAC(0, "spdif_frac", "spdif_pll", 0, + COMPOSITE_FRAC(0, "spdif_frac", "spdif_pre", CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(9), 0, RK2928_CLKGATE_CON(0), 14, GFLAGS), - MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0, + MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(5), 8, 2, MFLAGS), /* @@ -557,6 +563,8 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = { GATE(0, "pclkin_cif1", "ext_cif1", 0, RK2928_CLKGATE_CON(3), 4, GFLAGS), + INVERTER(0, "pclk_cif1", "pclkin_cif1", + RK2928_CLKSEL_CON(30), 12, IFLAGS), COMPOSITE(0, "aclk_gpu_src", mux_pll_src_cpll_gpll_p, 0, RK2928_CLKSEL_CON(33), 8, 1, MFLAGS, 0, 5, DFLAGS, @@ -708,6 +716,8 @@ static const char *const rk3188_critical_clocks[] __initconst = { "aclk_cpu", "aclk_peri", "hclk_peri", + "pclk_cpu", + "pclk_peri", }; static void __init rk3188_common_clk_init(struct device_node *np) @@ -736,8 +746,6 @@ static void __init rk3188_common_clk_init(struct device_node *np) rockchip_clk_register_branches(common_clk_branches, ARRAY_SIZE(common_clk_branches)); - rockchip_clk_protect_critical(rk3188_critical_clocks, - ARRAY_SIZE(rk3188_critical_clocks)); rockchip_register_softrst(np, 9, reg_base + RK2928_SOFTRST_CON(0), ROCKCHIP_SOFTRST_HIWORD_MASK); @@ -757,6 +765,8 @@ static void __init rk3066a_clk_init(struct device_node *np) mux_armclk_p, ARRAY_SIZE(mux_armclk_p), &rk3066_cpuclk_data, rk3066_cpuclk_rates, ARRAY_SIZE(rk3066_cpuclk_rates)); + rockchip_clk_protect_critical(rk3188_critical_clocks, + ARRAY_SIZE(rk3188_critical_clocks)); } CLK_OF_DECLARE(rk3066a_cru, "rockchip,rk3066a-cru", rk3066a_clk_init); @@ -793,6 +803,9 @@ static void __init rk3188a_clk_init(struct device_node *np) pr_warn("%s: missing clocks to reparent aclk_cpu_pre to gpll\n", __func__); } + + rockchip_clk_protect_critical(rk3188_critical_clocks, + ARRAY_SIZE(rk3188_critical_clocks)); } CLK_OF_DECLARE(rk3188a_cru, "rockchip,rk3188a-cru", rk3188a_clk_init); @@ -809,7 +822,7 @@ static void __init rk3188_clk_init(struct device_node *np) rate = pll->rate_table; while (rate->rate > 0) { - rate->bwadj = 0; + rate->nb = 1; rate++; } } diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c index 0211162ee..9040878e3 100644 --- a/drivers/clk/rockchip/clk-rk3288.c +++ b/drivers/clk/rockchip/clk-rk3288.c @@ -84,7 +84,7 @@ static struct rockchip_pll_rate_table rk3288_pll_rates[] = { RK3066_PLL_RATE( 742500000, 8, 495, 2), RK3066_PLL_RATE( 696000000, 1, 58, 2), RK3066_PLL_RATE( 600000000, 1, 50, 2), - RK3066_PLL_RATE_BWADJ(594000000, 1, 198, 8, 1), + RK3066_PLL_RATE_NB(594000000, 1, 198, 8, 1), RK3066_PLL_RATE( 552000000, 1, 46, 2), RK3066_PLL_RATE( 504000000, 1, 84, 4), RK3066_PLL_RATE( 500000000, 3, 125, 2), @@ -189,7 +189,7 @@ PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" }; PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" }; PNAME(mux_uart3_p) = { "uart3_src", "uart3_frac", "xin24m" }; PNAME(mux_uart4_p) = { "uart4_src", "uart4_frac", "xin24m" }; -PNAME(mux_cif_out_p) = { "cif_src", "xin24m" }; +PNAME(mux_vip_out_p) = { "vip_src", "xin24m" }; PNAME(mux_mac_p) = { "mac_pll_src", "ext_gmac" }; PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" }; PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" }; @@ -223,6 +223,7 @@ static struct clk_div_table div_hclk_cpu_t[] = { #define MFLAGS CLK_MUX_HIWORD_MASK #define DFLAGS CLK_DIVIDER_HIWORD_MASK #define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE) +#define IFLAGS ROCKCHIP_INVERTER_HIWORD_MASK static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { /* @@ -434,7 +435,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0, RK3288_CLKSEL_CON(26), 8, 1, MFLAGS, RK3288_CLKGATE_CON(3), 7, GFLAGS), - COMPOSITE_NOGATE(0, "sclk_vip_out", mux_cif_out_p, 0, + COMPOSITE_NOGATE(0, "sclk_vip_out", mux_vip_out_p, 0, RK3288_CLKSEL_CON(26), 15, 1, MFLAGS, 9, 5, DFLAGS), DIV(0, "pclk_pd_alive", "gpll", 0, @@ -592,8 +593,10 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { COMPOSITE(0, "hsadc_src", mux_pll_src_cpll_gpll_p, 0, RK3288_CLKSEL_CON(22), 0, 1, MFLAGS, 8, 8, DFLAGS, RK3288_CLKGATE_CON(2), 6, GFLAGS), - MUX(SCLK_HSADC, "sclk_hsadc_out", mux_hsadcout_p, 0, + MUX(0, "sclk_hsadc_out", mux_hsadcout_p, 0, RK3288_CLKSEL_CON(22), 4, 1, MFLAGS), + INVERTER(SCLK_HSADC, "sclk_hsadc", "sclk_hsadc_out", + RK3288_CLKSEL_CON(22), 7, IFLAGS), GATE(0, "jtag", "ext_jtag", 0, RK3288_CLKGATE_CON(4), 14, GFLAGS), @@ -768,13 +771,16 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { */ GATE(0, "pclk_vip_in", "ext_vip", 0, RK3288_CLKGATE_CON(16), 0, GFLAGS), + INVERTER(0, "pclk_vip", "pclk_vip_in", RK3288_CLKSEL_CON(29), 4, IFLAGS), GATE(0, "pclk_isp_in", "ext_isp", 0, RK3288_CLKGATE_CON(16), 3, GFLAGS), + INVERTER(0, "pclk_isp", "pclk_isp_in", RK3288_CLKSEL_CON(29), 3, IFLAGS), }; static const char *const rk3288_critical_clocks[] __initconst = { "aclk_cpu", "aclk_peri", "hclk_peri", + "pclk_pd_pmu", }; #ifdef CONFIG_PM_SLEEP diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c new file mode 100644 index 000000000..7e6b783e6 --- /dev/null +++ b/drivers/clk/rockchip/clk-rk3368.c @@ -0,0 +1,887 @@ +/* + * Copyright (c) 2015 Heiko Stuebner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include "clk.h" + +#define RK3368_GRF_SOC_STATUS0 0x480 + +enum rk3368_plls { + apllb, aplll, dpll, cpll, gpll, npll, +}; + +static struct rockchip_pll_rate_table rk3368_pll_rates[] = { + RK3066_PLL_RATE(2208000000, 1, 92, 1), + RK3066_PLL_RATE(2184000000, 1, 91, 1), + RK3066_PLL_RATE(2160000000, 1, 90, 1), + RK3066_PLL_RATE(2136000000, 1, 89, 1), + RK3066_PLL_RATE(2112000000, 1, 88, 1), + RK3066_PLL_RATE(2088000000, 1, 87, 1), + RK3066_PLL_RATE(2064000000, 1, 86, 1), + RK3066_PLL_RATE(2040000000, 1, 85, 1), + RK3066_PLL_RATE(2016000000, 1, 84, 1), + RK3066_PLL_RATE(1992000000, 1, 83, 1), + RK3066_PLL_RATE(1968000000, 1, 82, 1), + RK3066_PLL_RATE(1944000000, 1, 81, 1), + RK3066_PLL_RATE(1920000000, 1, 80, 1), + RK3066_PLL_RATE(1896000000, 1, 79, 1), + RK3066_PLL_RATE(1872000000, 1, 78, 1), + RK3066_PLL_RATE(1848000000, 1, 77, 1), + RK3066_PLL_RATE(1824000000, 1, 76, 1), + RK3066_PLL_RATE(1800000000, 1, 75, 1), + RK3066_PLL_RATE(1776000000, 1, 74, 1), + RK3066_PLL_RATE(1752000000, 1, 73, 1), + RK3066_PLL_RATE(1728000000, 1, 72, 1), + RK3066_PLL_RATE(1704000000, 1, 71, 1), + RK3066_PLL_RATE(1680000000, 1, 70, 1), + RK3066_PLL_RATE(1656000000, 1, 69, 1), + RK3066_PLL_RATE(1632000000, 1, 68, 1), + RK3066_PLL_RATE(1608000000, 1, 67, 1), + RK3066_PLL_RATE(1560000000, 1, 65, 1), + RK3066_PLL_RATE(1512000000, 1, 63, 1), + RK3066_PLL_RATE(1488000000, 1, 62, 1), + RK3066_PLL_RATE(1464000000, 1, 61, 1), + RK3066_PLL_RATE(1440000000, 1, 60, 1), + RK3066_PLL_RATE(1416000000, 1, 59, 1), + RK3066_PLL_RATE(1392000000, 1, 58, 1), + RK3066_PLL_RATE(1368000000, 1, 57, 1), + RK3066_PLL_RATE(1344000000, 1, 56, 1), + RK3066_PLL_RATE(1320000000, 1, 55, 1), + RK3066_PLL_RATE(1296000000, 1, 54, 1), + RK3066_PLL_RATE(1272000000, 1, 53, 1), + RK3066_PLL_RATE(1248000000, 1, 52, 1), + RK3066_PLL_RATE(1224000000, 1, 51, 1), + RK3066_PLL_RATE(1200000000, 1, 50, 1), + RK3066_PLL_RATE(1176000000, 1, 49, 1), + RK3066_PLL_RATE(1128000000, 1, 47, 1), + RK3066_PLL_RATE(1104000000, 1, 46, 1), + RK3066_PLL_RATE(1008000000, 1, 84, 2), + RK3066_PLL_RATE( 912000000, 1, 76, 2), + RK3066_PLL_RATE( 888000000, 1, 74, 2), + RK3066_PLL_RATE( 816000000, 1, 68, 2), + RK3066_PLL_RATE( 792000000, 1, 66, 2), + RK3066_PLL_RATE( 696000000, 1, 58, 2), + RK3066_PLL_RATE( 672000000, 1, 56, 2), + RK3066_PLL_RATE( 648000000, 1, 54, 2), + RK3066_PLL_RATE( 624000000, 1, 52, 2), + RK3066_PLL_RATE( 600000000, 1, 50, 2), + RK3066_PLL_RATE( 576000000, 1, 48, 2), + RK3066_PLL_RATE( 552000000, 1, 46, 2), + RK3066_PLL_RATE( 528000000, 1, 88, 4), + RK3066_PLL_RATE( 504000000, 1, 84, 4), + RK3066_PLL_RATE( 480000000, 1, 80, 4), + RK3066_PLL_RATE( 456000000, 1, 76, 4), + RK3066_PLL_RATE( 408000000, 1, 68, 4), + RK3066_PLL_RATE( 312000000, 1, 52, 4), + RK3066_PLL_RATE( 252000000, 1, 84, 8), + RK3066_PLL_RATE( 216000000, 1, 72, 8), + RK3066_PLL_RATE( 126000000, 2, 84, 8), + RK3066_PLL_RATE( 48000000, 2, 32, 8), + { /* sentinel */ }, +}; + +PNAME(mux_pll_p) = { "xin24m", "xin32k" }; +PNAME(mux_armclkb_p) = { "apllb_core", "gpllb_core" }; +PNAME(mux_armclkl_p) = { "aplll_core", "gplll_core" }; +PNAME(mux_ddrphy_p) = { "dpll_ddr", "gpll_ddr" }; +PNAME(mux_cs_src_p) = { "apllb_cs", "aplll_cs", "gpll_cs"}; +PNAME(mux_aclk_bus_src_p) = { "cpll_aclk_bus", "gpll_aclk_bus" }; + +PNAME(mux_pll_src_cpll_gpll_p) = { "cpll", "gpll" }; +PNAME(mux_pll_src_cpll_gpll_npll_p) = { "cpll", "gpll", "npll" }; +PNAME(mux_pll_src_npll_cpll_gpll_p) = { "npll", "cpll", "gpll" }; +PNAME(mux_pll_src_cpll_gpll_usb_p) = { "cpll", "gpll", "usbphy_480m" }; +PNAME(mux_pll_src_cpll_gpll_usb_usb_p) = { "cpll", "gpll", "usbphy_480m", + "usbphy_480m" }; +PNAME(mux_pll_src_cpll_gpll_usb_npll_p) = { "cpll", "gpll", "usbphy_480m", + "npll" }; +PNAME(mux_pll_src_cpll_gpll_npll_npll_p) = { "cpll", "gpll", "npll", "npll" }; +PNAME(mux_pll_src_cpll_gpll_npll_usb_p) = { "cpll", "gpll", "npll", + "usbphy_480m" }; + +PNAME(mux_i2s_8ch_pre_p) = { "i2s_8ch_src", "i2s_8ch_frac", + "ext_i2s", "xin12m" }; +PNAME(mux_i2s_8ch_clkout_p) = { "i2s_8ch_pre", "xin12m" }; +PNAME(mux_i2s_2ch_p) = { "i2s_2ch_src", "i2s_2ch_frac", + "dummy", "xin12m" }; +PNAME(mux_spdif_8ch_p) = { "spdif_8ch_pre", "spdif_8ch_frac", + "ext_i2s", "xin12m" }; +PNAME(mux_edp_24m_p) = { "dummy", "xin24m" }; +PNAME(mux_vip_out_p) = { "vip_src", "xin24m" }; +PNAME(mux_usbphy480m_p) = { "usbotg_out", "xin24m" }; +PNAME(mux_hsic_usbphy480m_p) = { "usbotg_out", "dummy" }; +PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy_480m" }; +PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" }; +PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" }; +PNAME(mux_uart2_p) = { "uart2_src", "xin24m" }; +PNAME(mux_uart3_p) = { "uart3_src", "uart3_frac", "xin24m" }; +PNAME(mux_uart4_p) = { "uart4_src", "uart4_frac", "xin24m" }; +PNAME(mux_mac_p) = { "mac_pll_src", "ext_gmac" }; +PNAME(mux_mmc_src_p) = { "cpll", "gpll", "usbphy_480m", "xin24m" }; + +static struct rockchip_pll_clock rk3368_pll_clks[] __initdata = { + [apllb] = PLL(pll_rk3066, PLL_APLLB, "apllb", mux_pll_p, 0, RK3368_PLL_CON(0), + RK3368_PLL_CON(3), 8, 1, 0, rk3368_pll_rates), + [aplll] = PLL(pll_rk3066, PLL_APLLL, "aplll", mux_pll_p, 0, RK3368_PLL_CON(4), + RK3368_PLL_CON(7), 8, 0, 0, rk3368_pll_rates), + [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK3368_PLL_CON(8), + RK3368_PLL_CON(11), 8, 2, 0, NULL), + [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK3368_PLL_CON(12), + RK3368_PLL_CON(15), 8, 3, ROCKCHIP_PLL_SYNC_RATE, rk3368_pll_rates), + [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK3368_PLL_CON(16), + RK3368_PLL_CON(19), 8, 4, ROCKCHIP_PLL_SYNC_RATE, rk3368_pll_rates), + [npll] = PLL(pll_rk3066, PLL_NPLL, "npll", mux_pll_p, 0, RK3368_PLL_CON(20), + RK3368_PLL_CON(23), 8, 5, ROCKCHIP_PLL_SYNC_RATE, rk3368_pll_rates), +}; + +static struct clk_div_table div_ddrphy_t[] = { + { .val = 0, .div = 1 }, + { .val = 1, .div = 2 }, + { .val = 3, .div = 4 }, + { /* sentinel */ }, +}; + +#define MFLAGS CLK_MUX_HIWORD_MASK +#define DFLAGS CLK_DIVIDER_HIWORD_MASK +#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE) +#define IFLAGS ROCKCHIP_INVERTER_HIWORD_MASK + +static const struct rockchip_cpuclk_reg_data rk3368_cpuclkb_data = { + .core_reg = RK3368_CLKSEL_CON(0), + .div_core_shift = 0, + .div_core_mask = 0x1f, + .mux_core_shift = 15, +}; + +static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = { + .core_reg = RK3368_CLKSEL_CON(2), + .div_core_shift = 0, + .div_core_mask = 0x1f, + .mux_core_shift = 7, +}; + +#define RK3368_DIV_ACLKM_MASK 0x1f +#define RK3368_DIV_ACLKM_SHIFT 8 +#define RK3368_DIV_ATCLK_MASK 0x1f +#define RK3368_DIV_ATCLK_SHIFT 0 +#define RK3368_DIV_PCLK_DBG_MASK 0x1f +#define RK3368_DIV_PCLK_DBG_SHIFT 8 + +#define RK3368_CLKSEL0(_offs, _aclkm) \ + { \ + .reg = RK3288_CLKSEL_CON(0 + _offs), \ + .val = HIWORD_UPDATE(_aclkm, RK3368_DIV_ACLKM_MASK, \ + RK3368_DIV_ACLKM_SHIFT), \ + } +#define RK3368_CLKSEL1(_offs, _atclk, _pdbg) \ + { \ + .reg = RK3288_CLKSEL_CON(1 + _offs), \ + .val = HIWORD_UPDATE(_atclk, RK3368_DIV_ATCLK_MASK, \ + RK3368_DIV_ATCLK_SHIFT) | \ + HIWORD_UPDATE(_pdbg, RK3368_DIV_PCLK_DBG_MASK, \ + RK3368_DIV_PCLK_DBG_SHIFT), \ + } + +/* cluster_b: aclkm in clksel0, rest in clksel1 */ +#define RK3368_CPUCLKB_RATE(_prate, _aclkm, _atclk, _pdbg) \ + { \ + .prate = _prate, \ + .divs = { \ + RK3368_CLKSEL0(0, _aclkm), \ + RK3368_CLKSEL1(0, _atclk, _pdbg), \ + }, \ + } + +/* cluster_l: aclkm in clksel2, rest in clksel3 */ +#define RK3368_CPUCLKL_RATE(_prate, _aclkm, _atclk, _pdbg) \ + { \ + .prate = _prate, \ + .divs = { \ + RK3368_CLKSEL0(2, _aclkm), \ + RK3368_CLKSEL1(2, _atclk, _pdbg), \ + }, \ + } + +static struct rockchip_cpuclk_rate_table rk3368_cpuclkb_rates[] __initdata = { + RK3368_CPUCLKB_RATE(1512000000, 2, 6, 6), + RK3368_CPUCLKB_RATE(1488000000, 2, 5, 5), + RK3368_CPUCLKB_RATE(1416000000, 2, 5, 5), + RK3368_CPUCLKB_RATE(1200000000, 2, 4, 4), + RK3368_CPUCLKB_RATE(1008000000, 2, 4, 4), + RK3368_CPUCLKB_RATE( 816000000, 2, 3, 3), + RK3368_CPUCLKB_RATE( 696000000, 2, 3, 3), + RK3368_CPUCLKB_RATE( 600000000, 2, 2, 2), + RK3368_CPUCLKB_RATE( 408000000, 2, 2, 2), + RK3368_CPUCLKB_RATE( 312000000, 2, 2, 2), +}; + +static struct rockchip_cpuclk_rate_table rk3368_cpuclkl_rates[] __initdata = { + RK3368_CPUCLKL_RATE(1512000000, 2, 7, 7), + RK3368_CPUCLKL_RATE(1488000000, 2, 6, 6), + RK3368_CPUCLKL_RATE(1416000000, 2, 6, 6), + RK3368_CPUCLKL_RATE(1200000000, 2, 5, 5), + RK3368_CPUCLKL_RATE(1008000000, 2, 5, 5), + RK3368_CPUCLKL_RATE( 816000000, 2, 4, 4), + RK3368_CPUCLKL_RATE( 696000000, 2, 3, 3), + RK3368_CPUCLKL_RATE( 600000000, 2, 3, 3), + RK3368_CPUCLKL_RATE( 408000000, 2, 2, 2), + RK3368_CPUCLKL_RATE( 312000000, 2, 2, 2), +}; + +static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = { + /* + * Clock-Architecture Diagram 2 + */ + + MUX(SCLK_USBPHY480M, "usbphy_480m", mux_usbphy480m_p, CLK_SET_RATE_PARENT, + RK3368_CLKSEL_CON(13), 8, 1, MFLAGS), + + GATE(0, "apllb_core", "apllb", CLK_IGNORE_UNUSED, + RK3368_CLKGATE_CON(0), 0, GFLAGS), + GATE(0, "gpllb_core", "gpll", CLK_IGNORE_UNUSED, + RK3368_CLKGATE_CON(0), 1, GFLAGS), + + GATE(0, "aplll_core", "aplll", CLK_IGNORE_UNUSED, + RK3368_CLKGATE_CON(0), 4, GFLAGS), + GATE(0, "gplll_core", "gpll", CLK_IGNORE_UNUSED, + RK3368_CLKGATE_CON(0), 5, GFLAGS), + + DIV(0, "aclkm_core_b", "armclkb", 0, + RK3368_CLKSEL_CON(0), 8, 5, DFLAGS | CLK_DIVIDER_READ_ONLY), + DIV(0, "atclk_core_b", "armclkb", 0, + RK3368_CLKSEL_CON(1), 0, 5, DFLAGS | CLK_DIVIDER_READ_ONLY), + DIV(0, "pclk_dbg_b", "armclkb", 0, + RK3368_CLKSEL_CON(1), 8, 5, DFLAGS | CLK_DIVIDER_READ_ONLY), + + DIV(0, "aclkm_core_l", "armclkl", 0, + RK3368_CLKSEL_CON(2), 8, 5, DFLAGS | CLK_DIVIDER_READ_ONLY), + DIV(0, "atclk_core_l", "armclkl", 0, + RK3368_CLKSEL_CON(3), 0, 5, DFLAGS | CLK_DIVIDER_READ_ONLY), + DIV(0, "pclk_dbg_l", "armclkl", 0, + RK3368_CLKSEL_CON(3), 8, 5, DFLAGS | CLK_DIVIDER_READ_ONLY), + + GATE(0, "apllb_cs", "apllb", CLK_IGNORE_UNUSED, + RK3368_CLKGATE_CON(0), 9, GFLAGS), + GATE(0, "aplll_cs", "aplll", CLK_IGNORE_UNUSED, + RK3368_CLKGATE_CON(0), 10, GFLAGS), + GATE(0, "gpll_cs", "gpll", CLK_IGNORE_UNUSED, + RK3368_CLKGATE_CON(0), 8, GFLAGS), + COMPOSITE_NOGATE(0, "sclk_cs_pre", mux_cs_src_p, CLK_IGNORE_UNUSED, + RK3368_CLKSEL_CON(4), 6, 2, MFLAGS, 0, 5, DFLAGS), + COMPOSITE_NOMUX(0, "clkin_trace", "sclk_cs_pre", CLK_IGNORE_UNUSED, + RK3368_CLKSEL_CON(4), 8, 5, DFLAGS, + RK3368_CLKGATE_CON(0), 13, GFLAGS), + + COMPOSITE(0, "aclk_cci_pre", mux_pll_src_cpll_gpll_usb_npll_p, CLK_IGNORE_UNUSED, + RK3368_CLKSEL_CON(5), 6, 2, MFLAGS, 0, 7, DFLAGS, + RK3368_CLKGATE_CON(0), 12, GFLAGS), + GATE(SCLK_PVTM_CORE, "sclk_pvtm_core", "xin24m", 0, RK3368_CLKGATE_CON(7), 10, GFLAGS), + + GATE(0, "dpll_ddr", "dpll", CLK_IGNORE_UNUSED, + RK3368_CLKGATE_CON(1), 8, GFLAGS), + GATE(0, "gpll_ddr", "gpll", 0, + RK3368_CLKGATE_CON(1), 9, GFLAGS), + COMPOSITE_NOGATE_DIVTBL(0, "ddrphy_src", mux_ddrphy_p, CLK_IGNORE_UNUSED, + RK3368_CLKSEL_CON(13), 4, 1, MFLAGS, 0, 2, DFLAGS, div_ddrphy_t), + + GATE(0, "sclk_ddr", "ddrphy_div4", CLK_IGNORE_UNUSED, + RK3368_CLKGATE_CON(6), 14, GFLAGS), + GATE(0, "sclk_ddr4x", "ddrphy_src", CLK_IGNORE_UNUSED, + RK3368_CLKGATE_CON(6), 15, GFLAGS), + + GATE(0, "gpll_aclk_bus", "gpll", CLK_IGNORE_UNUSED, + RK3368_CLKGATE_CON(1), 10, GFLAGS), + GATE(0, "cpll_aclk_bus", "cpll", CLK_IGNORE_UNUSED, + RK3368_CLKGATE_CON(1), 11, GFLAGS), + COMPOSITE_NOGATE(0, "aclk_bus_src", mux_aclk_bus_src_p, CLK_IGNORE_UNUSED, + RK3368_CLKSEL_CON(8), 7, 1, MFLAGS, 0, 5, DFLAGS), + + GATE(ACLK_BUS, "aclk_bus", "aclk_bus_src", CLK_IGNORE_UNUSED, + RK3368_CLKGATE_CON(1), 0, GFLAGS), + COMPOSITE_NOMUX(PCLK_BUS, "pclk_bus", "aclk_bus_src", CLK_IGNORE_UNUSED, + RK3368_CLKSEL_CON(8), 12, 3, DFLAGS, + RK3368_CLKGATE_CON(1), 2, GFLAGS), + COMPOSITE_NOMUX(HCLK_BUS, "hclk_bus", "aclk_bus_src", CLK_IGNORE_UNUSED, + RK3368_CLKSEL_CON(8), 8, 2, DFLAGS, + RK3368_CLKGATE_CON(1), 1, GFLAGS), + COMPOSITE_NOMUX(0, "sclk_crypto", "aclk_bus_src", 0, + RK3368_CLKSEL_CON(10), 14, 2, DFLAGS, + RK3368_CLKGATE_CON(7), 2, GFLAGS), + + COMPOSITE(0, "fclk_mcu_src", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED, + RK3368_CLKSEL_CON(12), 7, 1, MFLAGS, 0, 5, DFLAGS, + RK3368_CLKGATE_CON(1), 3, GFLAGS), + /* + * stclk_mcu is listed as child of fclk_mcu_src in diagram 5, + * but stclk_mcu has an additional own divider in diagram 2 + */ + COMPOSITE_NOMUX(0, "stclk_mcu", "fclk_mcu_src", 0, + RK3368_CLKSEL_CON(12), 8, 3, DFLAGS, + RK3368_CLKGATE_CON(13), 13, GFLAGS), + + COMPOSITE(0, "i2s_8ch_src", mux_pll_src_cpll_gpll_p, 0, + RK3368_CLKSEL_CON(27), 12, 1, MFLAGS, 0, 7, DFLAGS, + RK3368_CLKGATE_CON(6), 1, GFLAGS), + COMPOSITE_FRAC(0, "i2s_8ch_frac", "i2s_8ch_src", CLK_SET_RATE_PARENT, + RK3368_CLKSEL_CON(28), 0, + RK3368_CLKGATE_CON(6), 2, GFLAGS), + MUX(0, "i2s_8ch_pre", mux_i2s_8ch_pre_p, CLK_SET_RATE_PARENT, + RK3368_CLKSEL_CON(27), 8, 2, MFLAGS), + COMPOSITE_NODIV(SCLK_I2S_8CH_OUT, "i2s_8ch_clkout", mux_i2s_8ch_clkout_p, 0, + RK3368_CLKSEL_CON(27), 15, 1, MFLAGS, + RK3368_CLKGATE_CON(6), 0, GFLAGS), + GATE(SCLK_I2S_8CH, "sclk_i2s_8ch", "i2s_8ch_pre", CLK_SET_RATE_PARENT, + RK3368_CLKGATE_CON(6), 3, GFLAGS), + COMPOSITE(0, "spdif_8ch_src", mux_pll_src_cpll_gpll_p, 0, + RK3368_CLKSEL_CON(31), 12, 1, MFLAGS, 0, 7, DFLAGS, + RK3368_CLKGATE_CON(6), 4, GFLAGS), + COMPOSITE_FRAC(0, "spdif_8ch_frac", "spdif_8ch_src", CLK_SET_RATE_PARENT, + RK3368_CLKSEL_CON(32), 0, + RK3368_CLKGATE_CON(6), 5, GFLAGS), + COMPOSITE_NODIV(SCLK_SPDIF_8CH, "sclk_spdif_8ch", mux_spdif_8ch_p, 0, + RK3368_CLKSEL_CON(31), 8, 2, MFLAGS, + RK3368_CLKGATE_CON(6), 6, GFLAGS), + COMPOSITE(0, "i2s_2ch_src", mux_pll_src_cpll_gpll_p, 0, + RK3368_CLKSEL_CON(53), 12, 1, MFLAGS, 0, 7, DFLAGS, + RK3368_CLKGATE_CON(5), 13, GFLAGS), + COMPOSITE_FRAC(0, "i2s_2ch_frac", "i2s_2ch_src", CLK_SET_RATE_PARENT, + RK3368_CLKSEL_CON(54), 0, + RK3368_CLKGATE_CON(5), 14, GFLAGS), + COMPOSITE_NODIV(SCLK_I2S_2CH, "sclk_i2s_2ch", mux_i2s_2ch_p, 0, + RK3368_CLKSEL_CON(53), 8, 2, MFLAGS, + RK3368_CLKGATE_CON(5), 15, GFLAGS), + + COMPOSITE(0, "sclk_tsp", mux_pll_src_cpll_gpll_npll_p, 0, + RK3368_CLKSEL_CON(46), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3368_CLKGATE_CON(6), 12, GFLAGS), + GATE(0, "sclk_hsadc_tsp", "ext_hsadc_tsp", 0, + RK3368_CLKGATE_CON(13), 7, GFLAGS), + + MUX(0, "uart_src", mux_pll_src_cpll_gpll_p, 0, + RK3368_CLKSEL_CON(35), 12, 1, MFLAGS), + COMPOSITE_NOMUX(0, "uart2_src", "uart_src", 0, + RK3368_CLKSEL_CON(37), 0, 7, DFLAGS, + RK3368_CLKGATE_CON(2), 4, GFLAGS), + MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT, + RK3368_CLKSEL_CON(37), 8, 1, MFLAGS), + + /* + * Clock-Architecture Diagram 3 + */ + + COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_usb_p, 0, + RK3368_CLKSEL_CON(15), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3368_CLKGATE_CON(4), 6, GFLAGS), + COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb_p, 0, + RK3368_CLKSEL_CON(15), 14, 2, MFLAGS, 8, 5, DFLAGS, + RK3368_CLKGATE_CON(4), 7, GFLAGS), + + /* + * We introduce a virtual node of hclk_vodec_pre_v to split one clock + * struct with a gate and a fix divider into two node in software. + */ + GATE(0, "hclk_video_pre_v", "aclk_vdpu", 0, + RK3368_CLKGATE_CON(4), 8, GFLAGS), + + COMPOSITE(0, "sclk_hevc_cabac_src", mux_pll_src_cpll_gpll_npll_usb_p, 0, + RK3368_CLKSEL_CON(17), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3368_CLKGATE_CON(5), 1, GFLAGS), + COMPOSITE(0, "sclk_hevc_core_src", mux_pll_src_cpll_gpll_npll_usb_p, 0, + RK3368_CLKSEL_CON(17), 14, 2, MFLAGS, 8, 5, DFLAGS, + RK3368_CLKGATE_CON(5), 2, GFLAGS), + + COMPOSITE(0, "aclk_vio0", mux_pll_src_cpll_gpll_usb_p, CLK_IGNORE_UNUSED, + RK3368_CLKSEL_CON(19), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3368_CLKGATE_CON(4), 0, GFLAGS), + DIV(0, "hclk_vio", "aclk_vio0", 0, + RK3368_CLKSEL_CON(21), 0, 5, DFLAGS), + + COMPOSITE(0, "aclk_rga_pre", mux_pll_src_cpll_gpll_usb_p, 0, + RK3368_CLKSEL_CON(18), 14, 2, MFLAGS, 8, 5, DFLAGS, + RK3368_CLKGATE_CON(4), 3, GFLAGS), + COMPOSITE(SCLK_RGA, "sclk_rga", mux_pll_src_cpll_gpll_usb_p, 0, + RK3368_CLKSEL_CON(18), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3368_CLKGATE_CON(4), 4, GFLAGS), + + COMPOSITE(DCLK_VOP, "dclk_vop", mux_pll_src_cpll_gpll_npll_p, 0, + RK3368_CLKSEL_CON(20), 8, 2, MFLAGS, 0, 8, DFLAGS, + RK3368_CLKGATE_CON(4), 1, GFLAGS), + + GATE(SCLK_VOP0_PWM, "sclk_vop0_pwm", "xin24m", 0, + RK3368_CLKGATE_CON(4), 2, GFLAGS), + + COMPOSITE(SCLK_ISP, "sclk_isp", mux_pll_src_cpll_gpll_npll_npll_p, 0, + RK3368_CLKSEL_CON(22), 6, 2, MFLAGS, 0, 6, DFLAGS, + RK3368_CLKGATE_CON(4), 9, GFLAGS), + + GATE(0, "pclk_isp_in", "ext_isp", 0, + RK3368_CLKGATE_CON(17), 2, GFLAGS), + INVERTER(PCLK_ISP, "pclk_isp", "pclk_isp_in", + RK3368_CLKSEL_CON(21), 6, IFLAGS), + + GATE(0, "pclk_vip_in", "ext_vip", 0, + RK3368_CLKGATE_CON(16), 13, GFLAGS), + INVERTER(PCLK_VIP, "pclk_vip", "pclk_vip_in", + RK3368_CLKSEL_CON(21), 13, IFLAGS), + + GATE(SCLK_HDMI_HDCP, "sclk_hdmi_hdcp", "xin24m", 0, + RK3368_CLKGATE_CON(4), 13, GFLAGS), + GATE(SCLK_HDMI_CEC, "sclk_hdmi_cec", "xin32k", 0, + RK3368_CLKGATE_CON(5), 12, GFLAGS), + + COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0, + RK3368_CLKSEL_CON(21), 15, 1, MFLAGS, + RK3368_CLKGATE_CON(4), 5, GFLAGS), + COMPOSITE_NOGATE(0, "sclk_vip_out", mux_vip_out_p, 0, + RK3368_CLKSEL_CON(21), 14, 1, MFLAGS, 8, 5, DFLAGS), + + COMPOSITE_NODIV(SCLK_EDP_24M, "sclk_edp_24m", mux_edp_24m_p, 0, + RK3368_CLKSEL_CON(23), 8, 1, MFLAGS, + RK3368_CLKGATE_CON(5), 4, GFLAGS), + COMPOSITE(SCLK_EDP, "sclk_edp", mux_pll_src_cpll_gpll_npll_npll_p, 0, + RK3368_CLKSEL_CON(23), 6, 2, MFLAGS, 0, 6, DFLAGS, + RK3368_CLKGATE_CON(5), 3, GFLAGS), + + COMPOSITE(SCLK_HDCP, "sclk_hdcp", mux_pll_src_cpll_gpll_npll_npll_p, 0, + RK3368_CLKSEL_CON(55), 6, 2, MFLAGS, 0, 6, DFLAGS, + RK3368_CLKGATE_CON(5), 5, GFLAGS), + + DIV(0, "pclk_pd_alive", "gpll", 0, + RK3368_CLKSEL_CON(10), 8, 5, DFLAGS), + + /* sclk_timer has a gate in the sgrf */ + + COMPOSITE_NOMUX(0, "pclk_pd_pmu", "gpll", CLK_IGNORE_UNUSED, + RK3368_CLKSEL_CON(10), 0, 5, DFLAGS, + RK3368_CLKGATE_CON(7), 9, GFLAGS), + GATE(SCLK_PVTM_PMU, "sclk_pvtm_pmu", "xin24m", 0, + RK3368_CLKGATE_CON(7), 3, GFLAGS), + COMPOSITE(0, "sclk_gpu_core_src", mux_pll_src_cpll_gpll_usb_npll_p, 0, + RK3368_CLKSEL_CON(14), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3368_CLKGATE_CON(4), 11, GFLAGS), + MUX(0, "aclk_gpu_src", mux_pll_src_cpll_gpll_p, 0, + RK3368_CLKSEL_CON(14), 14, 1, MFLAGS), + COMPOSITE_NOMUX(0, "aclk_gpu_mem_pre", "aclk_gpu_src", 0, + RK3368_CLKSEL_CON(14), 8, 5, DFLAGS, + RK3368_CLKGATE_CON(5), 8, GFLAGS), + COMPOSITE_NOMUX(0, "aclk_gpu_cfg_pre", "aclk_gpu_src", 0, + RK3368_CLKSEL_CON(16), 8, 5, DFLAGS, + RK3368_CLKGATE_CON(5), 9, GFLAGS), + GATE(SCLK_PVTM_GPU, "sclk_pvtm_gpu", "xin24m", 0, + RK3368_CLKGATE_CON(7), 11, GFLAGS), + + COMPOSITE(0, "aclk_peri_src", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED, + RK3368_CLKSEL_CON(9), 7, 1, MFLAGS, 0, 5, DFLAGS, + RK3368_CLKGATE_CON(3), 0, GFLAGS), + COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "aclk_peri_src", 0, + RK3368_CLKSEL_CON(9), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO, + RK3368_CLKGATE_CON(3), 3, GFLAGS), + COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_src", CLK_IGNORE_UNUSED, + RK3368_CLKSEL_CON(9), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO, + RK3368_CLKGATE_CON(3), 2, GFLAGS), + GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", CLK_IGNORE_UNUSED, + RK3368_CLKGATE_CON(3), 1, GFLAGS), + + GATE(0, "sclk_mipidsi_24m", "xin24m", 0, RK3368_CLKGATE_CON(4), 14, GFLAGS), + + /* + * Clock-Architecture Diagram 4 + */ + + COMPOSITE(SCLK_SPI0, "sclk_spi0", mux_pll_src_cpll_gpll_p, 0, + RK3368_CLKSEL_CON(45), 7, 1, MFLAGS, 0, 7, DFLAGS, + RK3368_CLKGATE_CON(3), 7, GFLAGS), + COMPOSITE(SCLK_SPI1, "sclk_spi1", mux_pll_src_cpll_gpll_p, 0, + RK3368_CLKSEL_CON(45), 15, 1, MFLAGS, 8, 7, DFLAGS, + RK3368_CLKGATE_CON(3), 8, GFLAGS), + COMPOSITE(SCLK_SPI2, "sclk_spi2", mux_pll_src_cpll_gpll_p, 0, + RK3368_CLKSEL_CON(46), 15, 1, MFLAGS, 8, 7, DFLAGS, + RK3368_CLKGATE_CON(3), 9, GFLAGS), + + + COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0, + RK3368_CLKSEL_CON(50), 8, 2, MFLAGS, 0, 7, DFLAGS, + RK3368_CLKGATE_CON(7), 12, GFLAGS), + COMPOSITE(SCLK_SDIO0, "sclk_sdio0", mux_mmc_src_p, 0, + RK3368_CLKSEL_CON(48), 8, 2, MFLAGS, 0, 7, DFLAGS, + RK3368_CLKGATE_CON(7), 13, GFLAGS), + COMPOSITE(SCLK_EMMC, "sclk_emmc", mux_mmc_src_p, 0, + RK3368_CLKSEL_CON(51), 8, 2, MFLAGS, 0, 7, DFLAGS, + RK3368_CLKGATE_CON(7), 15, GFLAGS), + + MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", RK3368_SDMMC_CON0, 1), + MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3368_SDMMC_CON1, 0), + + MMC(SCLK_SDIO0_DRV, "sdio0_drv", "sclk_sdio0", RK3368_SDIO0_CON0, 1), + MMC(SCLK_SDIO0_SAMPLE, "sdio0_sample", "sclk_sdio0", RK3368_SDIO0_CON1, 0), + + MMC(SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc", RK3368_EMMC_CON0, 1), + MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK3368_EMMC_CON1, 0), + + GATE(SCLK_OTGPHY0, "sclk_otgphy0", "xin24m", CLK_IGNORE_UNUSED, + RK3368_CLKGATE_CON(8), 1, GFLAGS), + + /* pmu_grf_soc_con0[6] allows to select between xin32k and pvtm_pmu */ + GATE(SCLK_OTG_ADP, "sclk_otg_adp", "xin32k", CLK_IGNORE_UNUSED, + RK3368_CLKGATE_CON(8), 4, GFLAGS), + + /* pmu_grf_soc_con0[6] allows to select between xin32k and pvtm_pmu */ + COMPOSITE_NOMUX(SCLK_TSADC, "sclk_tsadc", "xin32k", 0, + RK3368_CLKSEL_CON(25), 0, 6, DFLAGS, + RK3368_CLKGATE_CON(3), 5, GFLAGS), + + COMPOSITE_NOMUX(SCLK_SARADC, "sclk_saradc", "xin24m", 0, + RK3368_CLKSEL_CON(25), 8, 8, DFLAGS, + RK3368_CLKGATE_CON(3), 6, GFLAGS), + + COMPOSITE(SCLK_NANDC0, "sclk_nandc0", mux_pll_src_cpll_gpll_p, 0, + RK3368_CLKSEL_CON(47), 7, 1, MFLAGS, 0, 5, DFLAGS, + RK3368_CLKGATE_CON(7), 8, GFLAGS), + + COMPOSITE(SCLK_SFC, "sclk_sfc", mux_pll_src_cpll_gpll_p, 0, + RK3368_CLKSEL_CON(52), 7, 1, MFLAGS, 0, 5, DFLAGS, + RK3368_CLKGATE_CON(6), 7, GFLAGS), + + COMPOSITE(0, "uart0_src", mux_pll_src_cpll_gpll_usb_usb_p, 0, + RK3368_CLKSEL_CON(33), 12, 2, MFLAGS, 0, 7, DFLAGS, + RK3368_CLKGATE_CON(2), 0, GFLAGS), + COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT, + RK3368_CLKSEL_CON(34), 0, + RK3368_CLKGATE_CON(2), 1, GFLAGS), + MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT, + RK3368_CLKSEL_CON(33), 8, 2, MFLAGS), + + COMPOSITE_NOMUX(0, "uart1_src", "uart_src", 0, + RK3368_CLKSEL_CON(35), 0, 7, DFLAGS, + RK3368_CLKGATE_CON(2), 2, GFLAGS), + COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT, + RK3368_CLKSEL_CON(36), 0, + RK3368_CLKGATE_CON(2), 3, GFLAGS), + MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT, + RK3368_CLKSEL_CON(35), 8, 2, MFLAGS), + + COMPOSITE_NOMUX(0, "uart3_src", "uart_src", 0, + RK3368_CLKSEL_CON(39), 0, 7, DFLAGS, + RK3368_CLKGATE_CON(2), 6, GFLAGS), + COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", CLK_SET_RATE_PARENT, + RK3368_CLKSEL_CON(40), 0, + RK3368_CLKGATE_CON(2), 7, GFLAGS), + MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, CLK_SET_RATE_PARENT, + RK3368_CLKSEL_CON(39), 8, 2, MFLAGS), + + COMPOSITE_NOMUX(0, "uart4_src", "uart_src", 0, + RK3368_CLKSEL_CON(41), 0, 7, DFLAGS, + RK3368_CLKGATE_CON(2), 8, GFLAGS), + COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", CLK_SET_RATE_PARENT, + RK3368_CLKSEL_CON(42), 0, + RK3368_CLKGATE_CON(2), 9, GFLAGS), + MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, CLK_SET_RATE_PARENT, + RK3368_CLKSEL_CON(41), 8, 2, MFLAGS), + + COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0, + RK3368_CLKSEL_CON(43), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3368_CLKGATE_CON(3), 4, GFLAGS), + MUX(SCLK_MAC, "mac_clk", mux_mac_p, CLK_SET_RATE_PARENT, + RK3368_CLKSEL_CON(43), 8, 1, MFLAGS), + GATE(SCLK_MACREF_OUT, "sclk_macref_out", "mac_clk", 0, + RK3368_CLKGATE_CON(7), 7, GFLAGS), + GATE(SCLK_MACREF, "sclk_macref", "mac_clk", 0, + RK3368_CLKGATE_CON(7), 6, GFLAGS), + GATE(SCLK_MAC_RX, "sclk_mac_rx", "mac_clk", 0, + RK3368_CLKGATE_CON(7), 4, GFLAGS), + GATE(SCLK_MAC_TX, "sclk_mac_tx", "mac_clk", 0, + RK3368_CLKGATE_CON(7), 5, GFLAGS), + + GATE(0, "jtag", "ext_jtag", 0, + RK3368_CLKGATE_CON(7), 0, GFLAGS), + + COMPOSITE_NODIV(0, "hsic_usbphy_480m", mux_hsic_usbphy480m_p, 0, + RK3368_CLKSEL_CON(26), 8, 2, MFLAGS, + RK3368_CLKGATE_CON(8), 0, GFLAGS), + COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0, + RK3368_CLKSEL_CON(26), 12, 2, MFLAGS, + RK3368_CLKGATE_CON(8), 7, GFLAGS), + GATE(SCLK_HSICPHY12M, "sclk_hsicphy12m", "xin12m", 0, + RK3368_CLKGATE_CON(8), 6, GFLAGS), + + /* + * Clock-Architecture Diagram 5 + */ + + /* aclk_cci_pre gates */ + GATE(0, "aclk_core_niu_cpup", "aclk_cci_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 4, GFLAGS), + GATE(0, "aclk_core_niu_cci", "aclk_cci_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 3, GFLAGS), + GATE(0, "aclk_cci400", "aclk_cci_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 2, GFLAGS), + GATE(0, "aclk_adb400m_pd_core_b", "aclk_cci_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 1, GFLAGS), + GATE(0, "aclk_adb400m_pd_core_l", "aclk_cci_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 0, GFLAGS), + + /* aclkm_core_* gates */ + GATE(0, "aclk_adb400s_pd_core_b", "aclkm_core_b", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(10), 0, GFLAGS), + GATE(0, "aclk_adb400s_pd_core_l", "aclkm_core_l", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(9), 0, GFLAGS), + + /* armclk* gates */ + GATE(0, "sclk_dbg_pd_core_b", "armclkb", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(10), 1, GFLAGS), + GATE(0, "sclk_dbg_pd_core_l", "armclkl", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(9), 1, GFLAGS), + + /* sclk_cs_pre gates */ + GATE(0, "sclk_dbg", "sclk_cs_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 7, GFLAGS), + GATE(0, "pclk_core_niu_sdbg", "sclk_cs_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 6, GFLAGS), + GATE(0, "hclk_core_niu_dbg", "sclk_cs_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 5, GFLAGS), + + /* aclk_bus gates */ + GATE(0, "aclk_strc_sys", "aclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(12), 12, GFLAGS), + GATE(ACLK_DMAC_BUS, "aclk_dmac_bus", "aclk_bus", 0, RK3368_CLKGATE_CON(12), 11, GFLAGS), + GATE(0, "sclk_intmem1", "aclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(12), 6, GFLAGS), + GATE(0, "sclk_intmem0", "aclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(12), 5, GFLAGS), + GATE(0, "aclk_intmem", "aclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(12), 4, GFLAGS), + GATE(0, "aclk_gic400", "aclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(13), 9, GFLAGS), + + /* sclk_ddr gates */ + GATE(0, "nclk_ddrupctl", "sclk_ddr", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(13), 2, GFLAGS), + + /* clk_hsadc_tsp is part of diagram2 */ + + /* fclk_mcu_src gates */ + GATE(0, "hclk_noc_mcu", "fclk_mcu_src", 0, RK3368_CLKGATE_CON(13), 14, GFLAGS), + GATE(0, "fclk_mcu", "fclk_mcu_src", 0, RK3368_CLKGATE_CON(13), 12, GFLAGS), + GATE(0, "hclk_mcu", "fclk_mcu_src", 0, RK3368_CLKGATE_CON(13), 11, GFLAGS), + + /* hclk_cpu gates */ + GATE(HCLK_SPDIF, "hclk_spdif", "hclk_bus", 0, RK3368_CLKGATE_CON(12), 10, GFLAGS), + GATE(HCLK_ROM, "hclk_rom", "hclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(12), 9, GFLAGS), + GATE(HCLK_I2S_2CH, "hclk_i2s_2ch", "hclk_bus", 0, RK3368_CLKGATE_CON(12), 8, GFLAGS), + GATE(HCLK_I2S_8CH, "hclk_i2s_8ch", "hclk_bus", 0, RK3368_CLKGATE_CON(12), 7, GFLAGS), + GATE(HCLK_TSP, "hclk_tsp", "hclk_bus", 0, RK3368_CLKGATE_CON(13), 10, GFLAGS), + GATE(HCLK_CRYPTO, "hclk_crypto", "hclk_bus", 0, RK3368_CLKGATE_CON(13), 4, GFLAGS), + GATE(MCLK_CRYPTO, "mclk_crypto", "hclk_bus", 0, RK3368_CLKGATE_CON(13), 3, GFLAGS), + + /* pclk_cpu gates */ + GATE(PCLK_DDRPHY, "pclk_ddrphy", "pclk_bus", 0, RK3368_CLKGATE_CON(12), 14, GFLAGS), + GATE(PCLK_DDRUPCTL, "pclk_ddrupctl", "pclk_bus", 0, RK3368_CLKGATE_CON(12), 13, GFLAGS), + GATE(PCLK_I2C1, "pclk_i2c1", "pclk_bus", 0, RK3368_CLKGATE_CON(12), 3, GFLAGS), + GATE(PCLK_I2C0, "pclk_i2c0", "pclk_bus", 0, RK3368_CLKGATE_CON(12), 2, GFLAGS), + GATE(PCLK_MAILBOX, "pclk_mailbox", "pclk_bus", 0, RK3368_CLKGATE_CON(12), 1, GFLAGS), + GATE(PCLK_PWM0, "pclk_pwm0", "pclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(12), 0, GFLAGS), + GATE(PCLK_SIM, "pclk_sim", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 8, GFLAGS), + GATE(PCLK_PWM1, "pclk_pwm1", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 6, GFLAGS), + GATE(PCLK_UART2, "pclk_uart2", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 5, GFLAGS), + GATE(0, "pclk_efuse_256", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 1, GFLAGS), + GATE(0, "pclk_efuse_1024", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 0, GFLAGS), + + /* + * video clk gates + * aclk_video(_pre) can actually select between parents of aclk_vdpu + * and aclk_vepu by setting bit GRF_SOC_CON0[7]. + */ + GATE(ACLK_VIDEO, "aclk_video", "aclk_vdpu", 0, RK3368_CLKGATE_CON(15), 0, GFLAGS), + GATE(SCLK_HEVC_CABAC, "sclk_hevc_cabac", "sclk_hevc_cabac_src", 0, RK3368_CLKGATE_CON(15), 3, GFLAGS), + GATE(SCLK_HEVC_CORE, "sclk_hevc_core", "sclk_hevc_core_src", 0, RK3368_CLKGATE_CON(15), 2, GFLAGS), + GATE(HCLK_VIDEO, "hclk_video", "hclk_video_pre", 0, RK3368_CLKGATE_CON(15), 1, GFLAGS), + + /* aclk_rga_pre gates */ + GATE(ACLK_VIO1_NOC, "aclk_vio1_noc", "aclk_rga_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(16), 10, GFLAGS), + GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 0, RK3368_CLKGATE_CON(16), 0, GFLAGS), + GATE(ACLK_HDCP, "aclk_hdcp", "aclk_rga_pre", 0, RK3368_CLKGATE_CON(17), 10, GFLAGS), + + /* aclk_vio0 gates */ + GATE(ACLK_VIP, "aclk_vip", "aclk_vio0", 0, RK3368_CLKGATE_CON(16), 11, GFLAGS), + GATE(ACLK_VIO0_NOC, "aclk_vio0_noc", "aclk_vio0", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(16), 9, GFLAGS), + GATE(ACLK_VOP, "aclk_vop", "aclk_vio0", 0, RK3368_CLKGATE_CON(16), 5, GFLAGS), + GATE(ACLK_VOP_IEP, "aclk_vop_iep", "aclk_vio0", 0, RK3368_CLKGATE_CON(16), 4, GFLAGS), + GATE(ACLK_IEP, "aclk_iep", "aclk_vio0", 0, RK3368_CLKGATE_CON(16), 2, GFLAGS), + + /* sclk_isp gates */ + GATE(HCLK_ISP, "hclk_isp", "sclk_isp", 0, RK3368_CLKGATE_CON(16), 14, GFLAGS), + GATE(ACLK_ISP, "aclk_isp", "sclk_isp", 0, RK3368_CLKGATE_CON(17), 0, GFLAGS), + + /* hclk_vio gates */ + GATE(HCLK_VIP, "hclk_vip", "hclk_vio", 0, RK3368_CLKGATE_CON(16), 12, GFLAGS), + GATE(HCLK_VIO_NOC, "hclk_vio_noc", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(16), 8, GFLAGS), + GATE(HCLK_VIO_AHB_ARBI, "hclk_vio_ahb_arbi", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(16), 7, GFLAGS), + GATE(HCLK_VOP, "hclk_vop", "hclk_vio", 0, RK3368_CLKGATE_CON(16), 6, GFLAGS), + GATE(HCLK_IEP, "hclk_iep", "hclk_vio", 0, RK3368_CLKGATE_CON(16), 3, GFLAGS), + GATE(HCLK_RGA, "hclk_rga", "hclk_vio", 0, RK3368_CLKGATE_CON(16), 1, GFLAGS), + GATE(HCLK_VIO_HDCPMMU, "hclk_hdcpmmu", "hclk_vio", 0, RK3368_CLKGATE_CON(17), 12, GFLAGS), + GATE(HCLK_VIO_H2P, "hclk_vio_h2p", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 7, GFLAGS), + + /* + * pclk_vio gates + * pclk_vio comes from the exactly same source as hclk_vio + */ + GATE(PCLK_HDCP, "pclk_hdcp", "hclk_vio", 0, RK3368_CLKGATE_CON(17), 11, GFLAGS), + GATE(PCLK_EDP_CTRL, "pclk_edp_ctrl", "hclk_vio", 0, RK3368_CLKGATE_CON(17), 9, GFLAGS), + GATE(PCLK_VIO_H2P, "pclk_vio_h2p", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 8, GFLAGS), + GATE(PCLK_HDMI_CTRL, "pclk_hdmi_ctrl", "hclk_vio", 0, RK3368_CLKGATE_CON(17), 6, GFLAGS), + GATE(PCLK_MIPI_CSI, "pclk_mipi_csi", "hclk_vio", 0, RK3368_CLKGATE_CON(17), 4, GFLAGS), + GATE(PCLK_MIPI_DSI0, "pclk_mipi_dsi0", "hclk_vio", 0, RK3368_CLKGATE_CON(17), 3, GFLAGS), + + /* ext_vip gates in diagram3 */ + + /* gpu gates */ + GATE(SCLK_GPU_CORE, "sclk_gpu_core", "sclk_gpu_core_src", 0, RK3368_CLKGATE_CON(18), 2, GFLAGS), + GATE(ACLK_GPU_MEM, "aclk_gpu_mem", "aclk_gpu_mem_pre", 0, RK3368_CLKGATE_CON(18), 1, GFLAGS), + GATE(ACLK_GPU_CFG, "aclk_gpu_cfg", "aclk_gpu_cfg_pre", 0, RK3368_CLKGATE_CON(18), 0, GFLAGS), + + /* aclk_peri gates */ + GATE(ACLK_DMAC_PERI, "aclk_dmac_peri", "aclk_peri", 0, RK3368_CLKGATE_CON(19), 3, GFLAGS), + GATE(0, "aclk_peri_axi_matrix", "aclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(19), 2, GFLAGS), + GATE(HCLK_SFC, "hclk_sfc", "aclk_peri", 0, RK3368_CLKGATE_CON(20), 15, GFLAGS), + GATE(ACLK_GMAC, "aclk_gmac", "aclk_peri", 0, RK3368_CLKGATE_CON(20), 13, GFLAGS), + GATE(0, "aclk_peri_niu", "aclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 8, GFLAGS), + GATE(ACLK_PERI_MMU, "aclk_peri_mmu", "aclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(21), 4, GFLAGS), + + /* hclk_peri gates */ + GATE(0, "hclk_peri_axi_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(19), 0, GFLAGS), + GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK3368_CLKGATE_CON(20), 11, GFLAGS), + GATE(0, "hclk_mmc_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 10, GFLAGS), + GATE(0, "hclk_emem_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 9, GFLAGS), + GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 7, GFLAGS), + GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 6, GFLAGS), + GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK3368_CLKGATE_CON(20), 5, GFLAGS), + GATE(HCLK_HOST1, "hclk_host1", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 4, GFLAGS), + GATE(HCLK_HOST0, "hclk_host0", "hclk_peri", 0, RK3368_CLKGATE_CON(20), 3, GFLAGS), + GATE(0, "pmu_hclk_otg0", "hclk_peri", 0, RK3368_CLKGATE_CON(20), 2, GFLAGS), + GATE(HCLK_OTG0, "hclk_otg0", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 1, GFLAGS), + GATE(HCLK_HSADC, "hclk_hsadc", "hclk_peri", 0, RK3368_CLKGATE_CON(21), 3, GFLAGS), + GATE(HCLK_EMMC, "hclk_emmc", "hclk_peri", 0, RK3368_CLKGATE_CON(21), 2, GFLAGS), + GATE(HCLK_SDIO0, "hclk_sdio0", "hclk_peri", 0, RK3368_CLKGATE_CON(21), 1, GFLAGS), + GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK3368_CLKGATE_CON(21), 0, GFLAGS), + + /* pclk_peri gates */ + GATE(PCLK_SARADC, "pclk_saradc", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 15, GFLAGS), + GATE(PCLK_I2C5, "pclk_i2c5", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 14, GFLAGS), + GATE(PCLK_I2C4, "pclk_i2c4", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 13, GFLAGS), + GATE(PCLK_I2C3, "pclk_i2c3", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 12, GFLAGS), + GATE(PCLK_I2C2, "pclk_i2c2", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 11, GFLAGS), + GATE(PCLK_UART4, "pclk_uart4", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 10, GFLAGS), + GATE(PCLK_UART3, "pclk_uart3", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 9, GFLAGS), + GATE(PCLK_UART1, "pclk_uart1", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 8, GFLAGS), + GATE(PCLK_UART0, "pclk_uart0", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 7, GFLAGS), + GATE(PCLK_SPI2, "pclk_spi2", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 6, GFLAGS), + GATE(PCLK_SPI1, "pclk_spi1", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 5, GFLAGS), + GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 4, GFLAGS), + GATE(0, "pclk_peri_axi_matrix", "pclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(19), 1, GFLAGS), + GATE(PCLK_GMAC, "pclk_gmac", "pclk_peri", 0, RK3368_CLKGATE_CON(20), 14, GFLAGS), + GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3368_CLKGATE_CON(20), 0, GFLAGS), + + /* pclk_pd_alive gates */ + GATE(PCLK_TIMER1, "pclk_timer1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 8, GFLAGS), + GATE(PCLK_TIMER0, "pclk_timer0", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 7, GFLAGS), + GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 12, GFLAGS), + GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 11, GFLAGS), + GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 3, GFLAGS), + GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 2, GFLAGS), + GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 1, GFLAGS), + + /* + * pclk_vio gates + * pclk_vio comes from the exactly same source as hclk_vio + */ + GATE(0, "pclk_dphyrx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS), + GATE(0, "pclk_dphytx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS), + + /* pclk_pd_pmu gates */ + GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 0, GFLAGS), + GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3368_CLKGATE_CON(17), 4, GFLAGS), + GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 3, GFLAGS), + GATE(0, "pclk_pmu_noc", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS), + GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 1, GFLAGS), + GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS), + + /* timer gates */ + GATE(0, "sclk_timer15", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 11, GFLAGS), + GATE(0, "sclk_timer14", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 10, GFLAGS), + GATE(0, "sclk_timer13", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 9, GFLAGS), + GATE(0, "sclk_timer12", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 8, GFLAGS), + GATE(0, "sclk_timer11", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 7, GFLAGS), + GATE(0, "sclk_timer10", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 6, GFLAGS), + GATE(0, "sclk_timer05", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 5, GFLAGS), + GATE(0, "sclk_timer04", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 4, GFLAGS), + GATE(0, "sclk_timer03", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 3, GFLAGS), + GATE(0, "sclk_timer02", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 2, GFLAGS), + GATE(0, "sclk_timer01", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 1, GFLAGS), + GATE(0, "sclk_timer00", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 0, GFLAGS), +}; + +static const char *const rk3368_critical_clocks[] __initconst = { + "pclk_pd_pmu", +}; + +static void __init rk3368_clk_init(struct device_node *np) +{ + void __iomem *reg_base; + struct clk *clk; + + reg_base = of_iomap(np, 0); + if (!reg_base) { + pr_err("%s: could not map cru region\n", __func__); + return; + } + + rockchip_clk_init(np, reg_base, CLK_NR_CLKS); + + /* xin12m is created by a cru-internal divider */ + clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2); + if (IS_ERR(clk)) + pr_warn("%s: could not register clock xin12m: %ld\n", + __func__, PTR_ERR(clk)); + + /* ddrphy_div4 is created by a cru-internal divider */ + clk = clk_register_fixed_factor(NULL, "ddrphy_div4", "ddrphy_src", 0, 1, 4); + if (IS_ERR(clk)) + pr_warn("%s: could not register clock xin12m: %ld\n", + __func__, PTR_ERR(clk)); + + clk = clk_register_fixed_factor(NULL, "hclk_video_pre", + "hclk_video_pre_v", 0, 1, 4); + if (IS_ERR(clk)) + pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n", + __func__, PTR_ERR(clk)); + + /* Watchdog pclk is controlled by sgrf_soc_con3[7]. */ + clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1); + if (IS_ERR(clk)) + pr_warn("%s: could not register clock pclk_wdt: %ld\n", + __func__, PTR_ERR(clk)); + else + rockchip_clk_add_lookup(clk, PCLK_WDT); + + rockchip_clk_register_plls(rk3368_pll_clks, + ARRAY_SIZE(rk3368_pll_clks), + RK3368_GRF_SOC_STATUS0); + rockchip_clk_register_branches(rk3368_clk_branches, + ARRAY_SIZE(rk3368_clk_branches)); + rockchip_clk_protect_critical(rk3368_critical_clocks, + ARRAY_SIZE(rk3368_critical_clocks)); + + rockchip_clk_register_armclk(ARMCLKB, "armclkb", + mux_armclkb_p, ARRAY_SIZE(mux_armclkb_p), + &rk3368_cpuclkb_data, rk3368_cpuclkb_rates, + ARRAY_SIZE(rk3368_cpuclkb_rates)); + + rockchip_clk_register_armclk(ARMCLKL, "armclkl", + mux_armclkl_p, ARRAY_SIZE(mux_armclkl_p), + &rk3368_cpuclkl_data, rk3368_cpuclkl_rates, + ARRAY_SIZE(rk3368_cpuclkl_rates)); + + rockchip_register_softrst(np, 15, reg_base + RK3368_SOFTRST_CON(0), + ROCKCHIP_SOFTRST_HIWORD_MASK); + + rockchip_register_restart_notifier(RK3368_GLB_SRST_FST); +} +CLK_OF_DECLARE(rk3368_cru, "rockchip,rk3368-cru", rk3368_clk_init); diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c index 052b94db0..249388156 100644 --- a/drivers/clk/rockchip/clk.c +++ b/drivers/clk/rockchip/clk.c @@ -277,6 +277,13 @@ void __init rockchip_clk_register_branches( list->div_shift ); break; + case branch_inverter: + clk = rockchip_clk_register_inverter( + list->name, list->parent_names, + list->num_parents, + reg_base + list->muxdiv_offset, + list->div_shift, list->div_flags, &clk_lock); + break; } /* none of the cases above matched */ diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h index 6b0926730..dc8ecb267 100644 --- a/drivers/clk/rockchip/clk.h +++ b/drivers/clk/rockchip/clk.h @@ -24,29 +24,29 @@ #define CLK_ROCKCHIP_CLK_H #include -#include -#include + +struct clk; #define HIWORD_UPDATE(val, mask, shift) \ ((val) << (shift) | (mask) << ((shift) + 16)) /* register positions shared by RK2928, RK3066 and RK3188 */ -#define RK2928_PLL_CON(x) (x * 0x4) +#define RK2928_PLL_CON(x) ((x) * 0x4) #define RK2928_MODE_CON 0x40 -#define RK2928_CLKSEL_CON(x) (x * 0x4 + 0x44) -#define RK2928_CLKGATE_CON(x) (x * 0x4 + 0xd0) +#define RK2928_CLKSEL_CON(x) ((x) * 0x4 + 0x44) +#define RK2928_CLKGATE_CON(x) ((x) * 0x4 + 0xd0) #define RK2928_GLB_SRST_FST 0x100 #define RK2928_GLB_SRST_SND 0x104 -#define RK2928_SOFTRST_CON(x) (x * 0x4 + 0x110) +#define RK2928_SOFTRST_CON(x) ((x) * 0x4 + 0x110) #define RK2928_MISC_CON 0x134 #define RK3288_PLL_CON(x) RK2928_PLL_CON(x) #define RK3288_MODE_CON 0x50 -#define RK3288_CLKSEL_CON(x) (x * 0x4 + 0x60) -#define RK3288_CLKGATE_CON(x) (x * 0x4 + 0x160) +#define RK3288_CLKSEL_CON(x) ((x) * 0x4 + 0x60) +#define RK3288_CLKGATE_CON(x) ((x) * 0x4 + 0x160) #define RK3288_GLB_SRST_FST 0x1b0 #define RK3288_GLB_SRST_SND 0x1b4 -#define RK3288_SOFTRST_CON(x) (x * 0x4 + 0x1b8) +#define RK3288_SOFTRST_CON(x) ((x) * 0x4 + 0x1b8) #define RK3288_MISC_CON 0x1e8 #define RK3288_SDMMC_CON0 0x200 #define RK3288_SDMMC_CON1 0x204 @@ -57,6 +57,22 @@ #define RK3288_EMMC_CON0 0x218 #define RK3288_EMMC_CON1 0x21c +#define RK3368_PLL_CON(x) RK2928_PLL_CON(x) +#define RK3368_CLKSEL_CON(x) ((x) * 0x4 + 0x100) +#define RK3368_CLKGATE_CON(x) ((x) * 0x4 + 0x200) +#define RK3368_GLB_SRST_FST 0x280 +#define RK3368_GLB_SRST_SND 0x284 +#define RK3368_SOFTRST_CON(x) ((x) * 0x4 + 0x300) +#define RK3368_MISC_CON 0x380 +#define RK3368_SDMMC_CON0 0x400 +#define RK3368_SDMMC_CON1 0x404 +#define RK3368_SDIO0_CON0 0x408 +#define RK3368_SDIO0_CON1 0x40c +#define RK3368_SDIO1_CON0 0x410 +#define RK3368_SDIO1_CON1 0x414 +#define RK3368_EMMC_CON0 0x418 +#define RK3368_EMMC_CON1 0x41c + enum rockchip_pll_type { pll_rk3066, }; @@ -67,16 +83,16 @@ enum rockchip_pll_type { .nr = _nr, \ .nf = _nf, \ .no = _no, \ - .bwadj = (_nf >> 1), \ + .nb = ((_nf) < 2) ? 1 : (_nf) >> 1, \ } -#define RK3066_PLL_RATE_BWADJ(_rate, _nr, _nf, _no, _bw) \ +#define RK3066_PLL_RATE_NB(_rate, _nr, _nf, _no, _nb) \ { \ .rate = _rate##U, \ .nr = _nr, \ .nf = _nf, \ .no = _no, \ - .bwadj = _bw, \ + .nb = _nb, \ } struct rockchip_pll_rate_table { @@ -84,7 +100,7 @@ struct rockchip_pll_rate_table { unsigned int nr; unsigned int nf; unsigned int no; - unsigned int bwadj; + unsigned int nb; }; /** @@ -182,6 +198,13 @@ struct clk *rockchip_clk_register_mmc(const char *name, const char *const *parent_names, u8 num_parents, void __iomem *reg, int shift); +#define ROCKCHIP_INVERTER_HIWORD_MASK BIT(0) + +struct clk *rockchip_clk_register_inverter(const char *name, + const char *const *parent_names, u8 num_parents, + void __iomem *reg, int shift, int flags, + spinlock_t *lock); + #define PNAME(x) static const char *const x[] __initconst enum rockchip_clk_branch_type { @@ -191,6 +214,7 @@ enum rockchip_clk_branch_type { branch_fraction_divider, branch_gate, branch_mmc, + branch_inverter, }; struct rockchip_clk_branch { @@ -308,6 +332,26 @@ struct rockchip_clk_branch { .gate_offset = -1, \ } +#define COMPOSITE_NOGATE_DIVTBL(_id, cname, pnames, f, mo, ms, \ + mw, mf, ds, dw, df, dt) \ + { \ + .id = _id, \ + .branch_type = branch_composite, \ + .name = cname, \ + .parent_names = pnames, \ + .num_parents = ARRAY_SIZE(pnames), \ + .flags = f, \ + .muxdiv_offset = mo, \ + .mux_shift = ms, \ + .mux_width = mw, \ + .mux_flags = mf, \ + .div_shift = ds, \ + .div_width = dw, \ + .div_flags = df, \ + .div_table = dt, \ + .gate_offset = -1, \ + } + #define COMPOSITE_FRAC(_id, cname, pname, f, mo, df, go, gs, gf)\ { \ .id = _id, \ @@ -394,6 +438,18 @@ struct rockchip_clk_branch { .div_shift = shift, \ } +#define INVERTER(_id, cname, pname, io, is, if) \ + { \ + .id = _id, \ + .branch_type = branch_inverter, \ + .name = cname, \ + .parent_names = (const char *[]){ pname }, \ + .num_parents = 1, \ + .muxdiv_offset = io, \ + .div_shift = is, \ + .div_flags = if, \ + } + void rockchip_clk_init(struct device_node *np, void __iomem *base, unsigned long nr_clks); struct regmap *rockchip_clk_get_grf(void); diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c index dd02356e2..2fe37f708 100644 --- a/drivers/clk/samsung/clk-cpu.c +++ b/drivers/clk/samsung/clk-cpu.c @@ -33,6 +33,9 @@ */ #include +#include +#include +#include #include "clk-cpu.h" #define E4210_SRC_CPU 0x0 @@ -97,8 +100,8 @@ static void wait_until_mux_stable(void __iomem *mux_reg, u32 mux_pos, static long exynos_cpuclk_round_rate(struct clk_hw *hw, unsigned long drate, unsigned long *prate) { - struct clk *parent = __clk_get_parent(hw->clk); - *prate = __clk_round_rate(parent, drate); + struct clk_hw *parent = clk_hw_get_parent(hw); + *prate = clk_hw_round_rate(parent, drate); return *prate; } diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c index 454b02ae4..4e9584d79 100644 --- a/drivers/clk/samsung/clk-exynos-audss.c +++ b/drivers/clk/samsung/clk-exynos-audss.c @@ -9,8 +9,9 @@ * Common Clock Framework support for Audio Subsystem Clock Controller. */ -#include +#include #include +#include #include #include #include diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c index 03a52228b..7cd02ff37 100644 --- a/drivers/clk/samsung/clk-exynos-clkout.c +++ b/drivers/clk/samsung/clk-exynos-clkout.c @@ -9,8 +9,8 @@ * Clock driver for Exynos clock output */ +#include #include -#include #include #include #include diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c index 538de66a7..fdd41b17a 100644 --- a/drivers/clk/samsung/clk-exynos3250.c +++ b/drivers/clk/samsung/clk-exynos3250.c @@ -8,8 +8,6 @@ * Common Clock Framework support for Exynos3250 SoC. */ -#include -#include #include #include #include @@ -19,6 +17,7 @@ #include #include "clk.h" +#include "clk-cpu.h" #include "clk-pll.h" #define SRC_LEFTBUS 0x4200 @@ -319,8 +318,10 @@ static struct samsung_mux_clock mux_clks[] __initdata = { MUX(CLK_MOUT_MPLL_USER_C, "mout_mpll_user_c", mout_mpll_user_p, SRC_CPU, 24, 1), MUX(CLK_MOUT_HPM, "mout_hpm", mout_hpm_p, SRC_CPU, 20, 1), - MUX(CLK_MOUT_CORE, "mout_core", mout_core_p, SRC_CPU, 16, 1), - MUX(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1), + MUX_F(CLK_MOUT_CORE, "mout_core", mout_core_p, SRC_CPU, 16, 1, + CLK_SET_RATE_PARENT, 0), + MUX_F(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1, + CLK_SET_RATE_PARENT, 0), }; static struct samsung_div_clock div_clks[] __initdata = { @@ -772,6 +773,26 @@ static struct samsung_cmu_info cmu_info __initdata = { .nr_clk_regs = ARRAY_SIZE(exynos3250_cmu_clk_regs), }; +#define E3250_CPU_DIV0(apll, pclk_dbg, atb, corem) \ + (((apll) << 24) | ((pclk_dbg) << 20) | ((atb) << 16) | \ + ((corem) << 4)) +#define E3250_CPU_DIV1(hpm, copy) \ + (((hpm) << 4) | ((copy) << 0)) + +static const struct exynos_cpuclk_cfg_data e3250_armclk_d[] __initconst = { + { 1000000, E3250_CPU_DIV0(1, 7, 4, 1), E3250_CPU_DIV1(7, 7), }, + { 900000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), }, + { 800000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), }, + { 700000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), }, + { 600000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), }, + { 500000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), }, + { 400000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), }, + { 300000, E3250_CPU_DIV0(1, 5, 3, 1), E3250_CPU_DIV1(7, 7), }, + { 200000, E3250_CPU_DIV0(1, 3, 3, 1), E3250_CPU_DIV1(7, 7), }, + { 100000, E3250_CPU_DIV0(1, 1, 1, 1), E3250_CPU_DIV1(7, 7), }, + { 0 }, +}; + static void __init exynos3250_cmu_init(struct device_node *np) { struct samsung_clk_provider *ctx; @@ -780,6 +801,11 @@ static void __init exynos3250_cmu_init(struct device_node *np) if (!ctx) return; + exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk", + mout_core_p[0], mout_core_p[1], 0x14200, + e3250_armclk_d, ARRAY_SIZE(e3250_armclk_d), + CLK_CPU_HAS_DIV1); + exynos3_core_down_clock(ctx->reg_base); } CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init); diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index d1af2fc53..7f370d3e0 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -11,8 +11,8 @@ */ #include +#include #include -#include #include #include #include @@ -1398,6 +1398,45 @@ static const struct exynos_cpuclk_cfg_data e4210_armclk_d[] __initconst = { { 0 }, }; +static const struct exynos_cpuclk_cfg_data e4212_armclk_d[] __initconst = { + { 1500000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4210_CPU_DIV1(2, 6), }, + { 1400000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4210_CPU_DIV1(2, 6), }, + { 1300000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4210_CPU_DIV1(2, 5), }, + { 1200000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4210_CPU_DIV1(2, 5), }, + { 1100000, E4210_CPU_DIV0(2, 1, 4, 0, 6, 3), E4210_CPU_DIV1(2, 4), }, + { 1000000, E4210_CPU_DIV0(1, 1, 4, 0, 5, 2), E4210_CPU_DIV1(2, 4), }, + { 900000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4210_CPU_DIV1(2, 3), }, + { 800000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4210_CPU_DIV1(2, 3), }, + { 700000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), }, + { 600000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), }, + { 500000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), }, + { 400000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), }, + { 300000, E4210_CPU_DIV0(1, 1, 2, 0, 4, 2), E4210_CPU_DIV1(2, 3), }, + { 200000, E4210_CPU_DIV0(1, 1, 1, 0, 3, 1), E4210_CPU_DIV1(2, 3), }, + { 0 }, +}; + +#define E4412_CPU_DIV1(cores, hpm, copy) \ + (((cores) << 8) | ((hpm) << 4) | ((copy) << 0)) + +static const struct exynos_cpuclk_cfg_data e4412_armclk_d[] __initconst = { + { 1500000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4412_CPU_DIV1(7, 0, 6), }, + { 1400000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4412_CPU_DIV1(6, 0, 6), }, + { 1300000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4412_CPU_DIV1(6, 0, 5), }, + { 1200000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4412_CPU_DIV1(5, 0, 5), }, + { 1100000, E4210_CPU_DIV0(2, 1, 4, 0, 6, 3), E4412_CPU_DIV1(5, 0, 4), }, + { 1000000, E4210_CPU_DIV0(1, 1, 4, 0, 5, 2), E4412_CPU_DIV1(4, 0, 4), }, + { 900000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4412_CPU_DIV1(4, 0, 3), }, + { 800000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4412_CPU_DIV1(3, 0, 3), }, + { 700000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4412_CPU_DIV1(3, 0, 3), }, + { 600000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4412_CPU_DIV1(2, 0, 3), }, + { 500000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4412_CPU_DIV1(2, 0, 3), }, + { 400000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4412_CPU_DIV1(1, 0, 3), }, + { 300000, E4210_CPU_DIV0(1, 1, 2, 0, 4, 2), E4412_CPU_DIV1(1, 0, 3), }, + { 200000, E4210_CPU_DIV0(1, 1, 1, 0, 3, 1), E4412_CPU_DIV1(0, 0, 3), }, + { 0 }, +}; + /* register exynos4 clocks */ static void __init exynos4_clk_init(struct device_node *np, enum exynos4_soc soc) @@ -1491,6 +1530,17 @@ static void __init exynos4_clk_init(struct device_node *np, samsung_clk_register_fixed_factor(ctx, exynos4x12_fixed_factor_clks, ARRAY_SIZE(exynos4x12_fixed_factor_clks)); + if (of_machine_is_compatible("samsung,exynos4412")) { + exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk", + mout_core_p4x12[0], mout_core_p4x12[1], 0x14200, + e4412_armclk_d, ARRAY_SIZE(e4412_armclk_d), + CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1); + } else { + exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk", + mout_core_p4x12[0], mout_core_p4x12[1], 0x14200, + e4212_armclk_d, ARRAY_SIZE(e4212_armclk_d), + CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1); + } } samsung_clk_register_alias(ctx, exynos4_aliases, diff --git a/drivers/clk/samsung/clk-exynos4415.c b/drivers/clk/samsung/clk-exynos4415.c index 6c78b09c8..92c39f6ef 100644 --- a/drivers/clk/samsung/clk-exynos4415.c +++ b/drivers/clk/samsung/clk-exynos4415.c @@ -9,8 +9,6 @@ * Common Clock Framework support for Exynos4415 SoC. */ -#include -#include #include #include #include diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c index 70ec3d260..55b83c7ef 100644 --- a/drivers/clk/samsung/clk-exynos5250.c +++ b/drivers/clk/samsung/clk-exynos5250.c @@ -11,14 +11,13 @@ */ #include -#include -#include #include #include #include #include #include "clk.h" +#include "clk-cpu.h" #define APLL_LOCK 0x0 #define APLL_CON0 0x100 @@ -748,6 +747,32 @@ static struct samsung_pll_clock exynos5250_plls[nr_plls] __initdata = { VPLL_LOCK, VPLL_CON0, NULL), }; +#define E5250_CPU_DIV0(apll, pclk_dbg, atb, periph, acp, cpud) \ + ((((apll) << 24) | ((pclk_dbg) << 20) | ((atb) << 16) | \ + ((periph) << 12) | ((acp) << 8) | ((cpud) << 4))) +#define E5250_CPU_DIV1(hpm, copy) \ + (((hpm) << 4) | (copy)) + +static const struct exynos_cpuclk_cfg_data exynos5250_armclk_d[] __initconst = { + { 1700000, E5250_CPU_DIV0(5, 3, 7, 7, 7, 3), E5250_CPU_DIV1(2, 0), }, + { 1600000, E5250_CPU_DIV0(4, 1, 7, 7, 7, 3), E5250_CPU_DIV1(2, 0), }, + { 1500000, E5250_CPU_DIV0(4, 1, 7, 7, 7, 2), E5250_CPU_DIV1(2, 0), }, + { 1400000, E5250_CPU_DIV0(4, 1, 6, 7, 7, 2), E5250_CPU_DIV1(2, 0), }, + { 1300000, E5250_CPU_DIV0(3, 1, 6, 7, 7, 2), E5250_CPU_DIV1(2, 0), }, + { 1200000, E5250_CPU_DIV0(3, 1, 5, 7, 7, 2), E5250_CPU_DIV1(2, 0), }, + { 1100000, E5250_CPU_DIV0(3, 1, 5, 7, 7, 3), E5250_CPU_DIV1(2, 0), }, + { 1000000, E5250_CPU_DIV0(2, 1, 4, 7, 7, 1), E5250_CPU_DIV1(2, 0), }, + { 900000, E5250_CPU_DIV0(2, 1, 4, 7, 7, 1), E5250_CPU_DIV1(2, 0), }, + { 800000, E5250_CPU_DIV0(2, 1, 4, 7, 7, 1), E5250_CPU_DIV1(2, 0), }, + { 700000, E5250_CPU_DIV0(1, 1, 3, 7, 7, 1), E5250_CPU_DIV1(2, 0), }, + { 600000, E5250_CPU_DIV0(1, 1, 3, 7, 7, 1), E5250_CPU_DIV1(2, 0), }, + { 500000, E5250_CPU_DIV0(1, 1, 2, 7, 7, 1), E5250_CPU_DIV1(2, 0), }, + { 400000, E5250_CPU_DIV0(1, 1, 2, 7, 7, 1), E5250_CPU_DIV1(2, 0), }, + { 300000, E5250_CPU_DIV0(1, 1, 1, 7, 7, 1), E5250_CPU_DIV1(2, 0), }, + { 200000, E5250_CPU_DIV0(1, 1, 1, 7, 7, 1), E5250_CPU_DIV1(2, 0), }, + { 0 }, +}; + static const struct of_device_id ext_clk_match[] __initconst = { { .compatible = "samsung,clock-xxti", .data = (void *)0, }, { }, @@ -797,6 +822,10 @@ static void __init exynos5250_clk_init(struct device_node *np) ARRAY_SIZE(exynos5250_div_clks)); samsung_clk_register_gate(ctx, exynos5250_gate_clks, ARRAY_SIZE(exynos5250_gate_clks)); + exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk", + mout_cpu_p[0], mout_cpu_p[1], 0x200, + exynos5250_armclk_d, ARRAY_SIZE(exynos5250_armclk_d), + CLK_CPU_HAS_DIV1); /* * Enable arm clock down (in idle) and set arm divider diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c index 06f96eb7c..d1a29f6c1 100644 --- a/drivers/clk/samsung/clk-exynos5260.c +++ b/drivers/clk/samsung/clk-exynos5260.c @@ -9,8 +9,6 @@ * Common Clock Framework support for Exynos5260 SoC. */ -#include -#include #include #include diff --git a/drivers/clk/samsung/clk-exynos5410.c b/drivers/clk/samsung/clk-exynos5410.c index 231475bc2..d5d5dcabc 100644 --- a/drivers/clk/samsung/clk-exynos5410.c +++ b/drivers/clk/samsung/clk-exynos5410.c @@ -11,8 +11,6 @@ #include -#include -#include #include #include #include diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index a1d731ca8..389af3c15 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -11,8 +11,7 @@ */ #include -#include -#include +#include #include #include #include diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c index 39c95649d..cee062c58 100644 --- a/drivers/clk/samsung/clk-exynos5433.c +++ b/drivers/clk/samsung/clk-exynos5433.c @@ -9,8 +9,6 @@ * Common Clock Framework support for Exynos5443 SoC. */ -#include -#include #include #include diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c index 979e81389..590813871 100644 --- a/drivers/clk/samsung/clk-exynos5440.c +++ b/drivers/clk/samsung/clk-exynos5440.c @@ -10,8 +10,6 @@ */ #include -#include -#include #include #include #include diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c index 03d36e847..8524e6670 100644 --- a/drivers/clk/samsung/clk-exynos7.c +++ b/drivers/clk/samsung/clk-exynos7.c @@ -8,8 +8,6 @@ * */ -#include -#include #include #include diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c index bebc61b5f..b7dd39610 100644 --- a/drivers/clk/samsung/clk-pll.c +++ b/drivers/clk/samsung/clk-pll.c @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include "clk.h" #include "clk-pll.h" @@ -180,7 +182,7 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate, rate = samsung_get_pll_settings(pll, drate); if (!rate) { pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__, - drate, __clk_get_name(hw->clk)); + drate, clk_hw_get_name(hw)); return -EINVAL; } @@ -288,7 +290,7 @@ static int samsung_pll36xx_set_rate(struct clk_hw *hw, unsigned long drate, rate = samsung_get_pll_settings(pll, drate); if (!rate) { pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__, - drate, __clk_get_name(hw->clk)); + drate, clk_hw_get_name(hw)); return -EINVAL; } @@ -403,7 +405,7 @@ static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate, rate = samsung_get_pll_settings(pll, drate); if (!rate) { pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__, - drate, __clk_get_name(hw->clk)); + drate, clk_hw_get_name(hw)); return -EINVAL; } @@ -455,7 +457,7 @@ static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate, if (ktime_to_ms(delta) > PLL_TIMEOUT_MS) { pr_err("%s: could not lock PLL %s\n", - __func__, __clk_get_name(hw->clk)); + __func__, clk_hw_get_name(hw)); return -EFAULT; } @@ -554,7 +556,7 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate, rate = samsung_get_pll_settings(pll, drate); if (!rate) { pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__, - drate, __clk_get_name(hw->clk)); + drate, clk_hw_get_name(hw)); return -EINVAL; } @@ -614,7 +616,7 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate, if (ktime_to_ms(delta) > PLL_TIMEOUT_MS) { pr_err("%s: could not lock PLL %s\n", - __func__, __clk_get_name(hw->clk)); + __func__, clk_hw_get_name(hw)); return -EFAULT; } @@ -772,7 +774,7 @@ static int samsung_s3c2410_pll_set_rate(struct clk_hw *hw, unsigned long drate, rate = samsung_get_pll_settings(pll, drate); if (!rate) { pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__, - drate, __clk_get_name(hw->clk)); + drate, clk_hw_get_name(hw)); return -EINVAL; } @@ -1013,7 +1015,7 @@ static int samsung_pll2550xx_set_rate(struct clk_hw *hw, unsigned long drate, rate = samsung_get_pll_settings(pll, drate); if (!rate) { pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__, - drate, __clk_get_name(hw->clk)); + drate, clk_hw_get_name(hw)); return -EINVAL; } @@ -1111,7 +1113,7 @@ static int samsung_pll2650xx_set_rate(struct clk_hw *hw, unsigned long drate, rate = samsung_get_pll_settings(pll, drate); if (!rate) { pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__, - drate, __clk_get_name(hw->clk)); + drate, clk_hw_get_name(hw)); return -EINVAL; } diff --git a/drivers/clk/samsung/clk-s3c2410-dclk.c b/drivers/clk/samsung/clk-s3c2410-dclk.c index e56df5064..e9eb935d7 100644 --- a/drivers/clk/samsung/clk-s3c2410-dclk.c +++ b/drivers/clk/samsung/clk-s3c2410-dclk.c @@ -8,6 +8,10 @@ * Common Clock Framework support for s3c24xx external clock output. */ +#include +#include +#include +#include #include #include #include "clk.h" @@ -57,7 +61,7 @@ struct s3c24xx_clkout { static u8 s3c24xx_clkout_get_parent(struct clk_hw *hw) { struct s3c24xx_clkout *clkout = to_s3c24xx_clkout(hw); - int num_parents = __clk_get_num_parents(hw->clk); + int num_parents = clk_hw_get_num_parents(hw); u32 val; val = readl_relaxed(S3C24XX_MISCCR) >> clkout->shift; diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c index 5d2f03461..0945a8852 100644 --- a/drivers/clk/samsung/clk-s3c2410.c +++ b/drivers/clk/samsung/clk-s3c2410.c @@ -8,8 +8,6 @@ * Common Clock Framework support for S3C2410 and following SoCs. */ -#include -#include #include #include #include diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c index 2ceedaf8c..44d6a9f4f 100644 --- a/drivers/clk/samsung/clk-s3c2412.c +++ b/drivers/clk/samsung/clk-s3c2412.c @@ -8,8 +8,6 @@ * Common Clock Framework support for S3C2412 and S3C2413. */ -#include -#include #include #include #include diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c index 0c3c182b9..2c0a1ea3c 100644 --- a/drivers/clk/samsung/clk-s3c2443.c +++ b/drivers/clk/samsung/clk-s3c2443.c @@ -8,8 +8,6 @@ * Common Clock Framework support for S3C2443 and following SoCs. */ -#include -#include #include #include #include diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c index 0f590e555..d325ed1e1 100644 --- a/drivers/clk/samsung/clk-s3c64xx.c +++ b/drivers/clk/samsung/clk-s3c64xx.c @@ -8,8 +8,7 @@ * Common Clock Framework support for all S3C64xx SoCs. */ -#include -#include +#include #include #include #include diff --git a/drivers/clk/samsung/clk-s5pv210-audss.c b/drivers/clk/samsung/clk-s5pv210-audss.c index de4455b75..eefb84b22 100644 --- a/drivers/clk/samsung/clk-s5pv210-audss.c +++ b/drivers/clk/samsung/clk-s5pv210-audss.c @@ -13,8 +13,8 @@ * Driver for Audio Subsystem Clock Controller of S5PV210-compatible SoCs. */ -#include #include +#include #include #include #include diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c index 793cb1d2f..759aaf342 100644 --- a/drivers/clk/samsung/clk-s5pv210.c +++ b/drivers/clk/samsung/clk-s5pv210.c @@ -11,8 +11,6 @@ * Common Clock Framework support for all S5PC110/S5PV210 SoCs. */ -#include -#include #include #include #include diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c index 011723839..f38a6c49f 100644 --- a/drivers/clk/samsung/clk.c +++ b/drivers/clk/samsung/clk.c @@ -11,6 +11,10 @@ * clock framework for Samsung platforms. */ +#include +#include +#include +#include #include #include diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h index b775fc29c..aa872d2c5 100644 --- a/drivers/clk/samsung/clk.h +++ b/drivers/clk/samsung/clk.h @@ -13,10 +13,11 @@ #ifndef __SAMSUNG_CLK_H #define __SAMSUNG_CLK_H -#include #include #include "clk-pll.h" +struct clk; + /** * struct samsung_clk_provider: information about clock provider * @reg_base: virtual address for the register base. diff --git a/drivers/clk/shmobile/clk-div6.c b/drivers/clk/shmobile/clk-div6.c index 036a692c7..b4c8d6746 100644 --- a/drivers/clk/shmobile/clk-div6.c +++ b/drivers/clk/shmobile/clk-div6.c @@ -11,12 +11,12 @@ */ #include -#include #include #include #include #include #include +#include #define CPG_DIV6_CKSTP BIT(8) #define CPG_DIV6_DIV(d) ((d) & 0x3f) @@ -133,13 +133,13 @@ static u8 cpg_div6_clock_get_parent(struct clk_hw *hw) hw_index = (clk_readl(clock->reg) >> clock->src_shift) & (BIT(clock->src_width) - 1); - for (i = 0; i < __clk_get_num_parents(hw->clk); i++) { + for (i = 0; i < clk_hw_get_num_parents(hw); i++) { if (clock->parents[i] == hw_index) return i; } pr_err("%s: %s DIV6 clock set to invalid parent %u\n", - __func__, __clk_get_name(hw->clk), hw_index); + __func__, clk_hw_get_name(hw), hw_index); return 0; } @@ -149,7 +149,7 @@ static int cpg_div6_clock_set_parent(struct clk_hw *hw, u8 index) u8 hw_index; u32 mask; - if (index >= __clk_get_num_parents(hw->clk)) + if (index >= clk_hw_get_num_parents(hw)) return -EINVAL; mask = ~((BIT(clock->src_width) - 1) << clock->src_shift); diff --git a/drivers/clk/shmobile/clk-emev2.c b/drivers/clk/shmobile/clk-emev2.c index 5b60beb7d..a91825471 100644 --- a/drivers/clk/shmobile/clk-emev2.c +++ b/drivers/clk/shmobile/clk-emev2.c @@ -28,6 +28,8 @@ #define USIBU1_RSTCTRL 0x0ac #define USIBU2_RSTCTRL 0x0b0 #define USIBU3_RSTCTRL 0x0b4 +#define IIC0_RSTCTRL 0x0dc +#define IIC1_RSTCTRL 0x0e0 #define STI_RSTCTRL 0x124 #define STI_CLKSEL 0x688 @@ -66,6 +68,10 @@ static void __init emev2_smu_init(void) emev2_smu_write(2, USIBU1_RSTCTRL); emev2_smu_write(2, USIBU2_RSTCTRL); emev2_smu_write(2, USIBU3_RSTCTRL); + + /* deassert reset for IIC0->IIC1 */ + emev2_smu_write(1, IIC0_RSTCTRL); + emev2_smu_write(1, IIC1_RSTCTRL); } static void __init emev2_smu_clkdiv_init(struct device_node *np) diff --git a/drivers/clk/shmobile/clk-mstp.c b/drivers/clk/shmobile/clk-mstp.c index 2d2fe773a..b1df7b2f1 100644 --- a/drivers/clk/shmobile/clk-mstp.c +++ b/drivers/clk/shmobile/clk-mstp.c @@ -2,6 +2,7 @@ * R-Car MSTP clocks * * Copyright (C) 2013 Ideas On Board SPRL + * Copyright (C) 2015 Glider bvba * * Contact: Laurent Pinchart * @@ -10,11 +11,16 @@ * the Free Software Foundation; version 2 of the License. */ +#include #include #include +#include +#include #include #include #include +#include +#include #include /* @@ -236,3 +242,84 @@ static void __init cpg_mstp_clocks_init(struct device_node *np) of_clk_add_provider(np, of_clk_src_onecell_get, &group->data); } CLK_OF_DECLARE(cpg_mstp_clks, "renesas,cpg-mstp-clocks", cpg_mstp_clocks_init); + + +#ifdef CONFIG_PM_GENERIC_DOMAINS_OF +int cpg_mstp_attach_dev(struct generic_pm_domain *domain, struct device *dev) +{ + struct device_node *np = dev->of_node; + struct of_phandle_args clkspec; + struct clk *clk; + int i = 0; + int error; + + while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, + &clkspec)) { + if (of_device_is_compatible(clkspec.np, + "renesas,cpg-mstp-clocks")) + goto found; + + of_node_put(clkspec.np); + i++; + } + + return 0; + +found: + clk = of_clk_get_from_provider(&clkspec); + of_node_put(clkspec.np); + + if (IS_ERR(clk)) + return PTR_ERR(clk); + + error = pm_clk_create(dev); + if (error) { + dev_err(dev, "pm_clk_create failed %d\n", error); + goto fail_put; + } + + error = pm_clk_add_clk(dev, clk); + if (error) { + dev_err(dev, "pm_clk_add_clk %pC failed %d\n", clk, error); + goto fail_destroy; + } + + return 0; + +fail_destroy: + pm_clk_destroy(dev); +fail_put: + clk_put(clk); + return error; +} + +void cpg_mstp_detach_dev(struct generic_pm_domain *domain, struct device *dev) +{ + if (!list_empty(&dev->power.subsys_data->clock_list)) + pm_clk_destroy(dev); +} + +void __init cpg_mstp_add_clk_domain(struct device_node *np) +{ + struct generic_pm_domain *pd; + u32 ncells; + + if (of_property_read_u32(np, "#power-domain-cells", &ncells)) { + pr_warn("%s lacks #power-domain-cells\n", np->full_name); + return; + } + + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + return; + + pd->name = np->name; + + pd->flags = GENPD_FLAG_PM_CLK; + pm_genpd_init(pd, &simple_qos_governor, false); + pd->attach_dev = cpg_mstp_attach_dev; + pd->detach_dev = cpg_mstp_detach_dev; + + of_genpd_add_provider_simple(np, pd); +} +#endif /* !CONFIG_PM_GENERIC_DOMAINS_OF */ diff --git a/drivers/clk/shmobile/clk-r8a73a4.c b/drivers/clk/shmobile/clk-r8a73a4.c index 29b9a0b00..9326204be 100644 --- a/drivers/clk/shmobile/clk-r8a73a4.c +++ b/drivers/clk/shmobile/clk-r8a73a4.c @@ -9,10 +9,10 @@ */ #include -#include #include #include #include +#include #include #include #include diff --git a/drivers/clk/shmobile/clk-r8a7740.c b/drivers/clk/shmobile/clk-r8a7740.c index 1e2eaae21..1e6b1da58 100644 --- a/drivers/clk/shmobile/clk-r8a7740.c +++ b/drivers/clk/shmobile/clk-r8a7740.c @@ -9,10 +9,10 @@ */ #include -#include #include #include #include +#include #include #include #include diff --git a/drivers/clk/shmobile/clk-r8a7778.c b/drivers/clk/shmobile/clk-r8a7778.c index cb33b5727..87c1d2f2f 100644 --- a/drivers/clk/shmobile/clk-r8a7778.c +++ b/drivers/clk/shmobile/clk-r8a7778.c @@ -9,9 +9,9 @@ */ #include -#include #include #include +#include struct r8a7778_cpg { struct clk_onecell_data data; @@ -124,6 +124,8 @@ static void __init r8a7778_cpg_clocks_init(struct device_node *np) } of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data); + + cpg_mstp_add_clk_domain(np); } CLK_OF_DECLARE(r8a7778_cpg_clks, "renesas,r8a7778-cpg-clocks", diff --git a/drivers/clk/shmobile/clk-r8a7779.c b/drivers/clk/shmobile/clk-r8a7779.c index 652ecacb6..92275c5f2 100644 --- a/drivers/clk/shmobile/clk-r8a7779.c +++ b/drivers/clk/shmobile/clk-r8a7779.c @@ -11,12 +11,12 @@ */ #include -#include #include #include #include #include #include +#include #include #include @@ -168,6 +168,8 @@ static void __init r8a7779_cpg_clocks_init(struct device_node *np) } of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data); + + cpg_mstp_add_clk_domain(np); } CLK_OF_DECLARE(r8a7779_cpg_clks, "renesas,r8a7779-cpg-clocks", r8a7779_cpg_clocks_init); diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c index acfb6d7db..745496f7e 100644 --- a/drivers/clk/shmobile/clk-rcar-gen2.c +++ b/drivers/clk/shmobile/clk-rcar-gen2.c @@ -11,13 +11,13 @@ */ #include -#include #include #include #include #include #include #include +#include #include struct rcar_gen2_cpg { @@ -415,6 +415,8 @@ static void __init rcar_gen2_cpg_clocks_init(struct device_node *np) } of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data); + + cpg_mstp_add_clk_domain(np); } CLK_OF_DECLARE(rcar_gen2_cpg_clks, "renesas,rcar-gen2-cpg-clocks", rcar_gen2_cpg_clocks_init); diff --git a/drivers/clk/shmobile/clk-rz.c b/drivers/clk/shmobile/clk-rz.c index 7e68e8630..9766e3cb5 100644 --- a/drivers/clk/shmobile/clk-rz.c +++ b/drivers/clk/shmobile/clk-rz.c @@ -10,6 +10,7 @@ */ #include +#include #include #include #include @@ -99,5 +100,7 @@ static void __init rz_cpg_clocks_init(struct device_node *np) } of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data); + + cpg_mstp_add_clk_domain(np); } CLK_OF_DECLARE(rz_cpg_clks, "renesas,rz-cpg-clocks", rz_cpg_clocks_init); diff --git a/drivers/clk/shmobile/clk-sh73a0.c b/drivers/clk/shmobile/clk-sh73a0.c index cd529cfe4..8966f8bbf 100644 --- a/drivers/clk/shmobile/clk-sh73a0.c +++ b/drivers/clk/shmobile/clk-sh73a0.c @@ -9,12 +9,12 @@ */ #include -#include #include #include #include #include #include +#include #include struct sh73a0_cpg { diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c index d63b76ca6..c5eaa9d16 100644 --- a/drivers/clk/sirf/clk-atlas6.c +++ b/drivers/clk/sirf/clk-atlas6.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/clk/sirf/clk-atlas7.c b/drivers/clk/sirf/clk-atlas7.c index db8ab691d..a98e21fe7 100644 --- a/drivers/clk/sirf/clk-atlas7.c +++ b/drivers/clk/sirf/clk-atlas7.c @@ -358,6 +358,7 @@ static unsigned long pll_clk_recalc_rate(struct clk_hw *hw, if (regctrl0 & SIRFSOC_ABPLL_CTRL0_SSEN) { rate = fin; rate *= 1 << 24; + do_div(rate, nr); do_div(rate, (256 * ((ssdiv >> ssdepth) << ssdepth) + (ssmod << ssdepth))); } else { @@ -465,6 +466,9 @@ static struct clk_pll clk_sys3pll = { * double resolution mode:fout = fin * finc / 2^29 * normal mode:fout = fin * finc / 2^28 */ +#define DTO_RESL_DOUBLE (1ULL << 29) +#define DTO_RESL_NORMAL (1ULL << 28) + static int dto_clk_is_enabled(struct clk_hw *hw) { struct clk_dto *clk = to_dtoclk(hw); @@ -509,9 +513,9 @@ static unsigned long dto_clk_recalc_rate(struct clk_hw *hw, rate *= finc; if (droff & BIT(0)) /* Double resolution off */ - do_div(rate, 1 << 28); + do_div(rate, DTO_RESL_NORMAL); else - do_div(rate, 1 << 29); + do_div(rate, DTO_RESL_DOUBLE); return rate; } @@ -519,11 +523,11 @@ static unsigned long dto_clk_recalc_rate(struct clk_hw *hw, static long dto_clk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { - u64 dividend = rate * (1 << 29); + u64 dividend = rate * DTO_RESL_DOUBLE; do_div(dividend, *parent_rate); dividend *= *parent_rate; - do_div(dividend, 1 << 29); + do_div(dividend, DTO_RESL_DOUBLE); return dividend; } @@ -531,7 +535,7 @@ static long dto_clk_round_rate(struct clk_hw *hw, unsigned long rate, static int dto_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { - u64 dividend = rate * (1 << 29); + u64 dividend = rate * DTO_RESL_DOUBLE; struct clk_dto *clk = to_dtoclk(hw); do_div(dividend, parent_rate); @@ -1161,7 +1165,7 @@ static struct atlas7_unit_init_data unit_list[] __initdata = { { 122, "spram1_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 0, &leaf6_gate_lock }, { 123, "spram2_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 1, &leaf6_gate_lock }, { 124, "coresight_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 2, &leaf6_gate_lock }, - { 125, "thcpum_cpudiv4", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 3, &leaf6_gate_lock }, + { 125, "coresight_tpiu", "cpum_tpiu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 3, &leaf6_gate_lock }, { 126, "graphic_gpu", "gpum_gpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 0, &leaf7_gate_lock }, { 127, "vss_sdr", "gpum_sdr", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 1, &leaf7_gate_lock }, { 128, "thgpum_nocr", "gpum_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 2, &leaf7_gate_lock }, @@ -1174,9 +1178,13 @@ static struct atlas7_unit_init_data unit_list[] __initdata = { { 135, "thbtm_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 7, &leaf8_gate_lock }, { 136, "btslow", "xinw_fixdiv_btslow", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 25, &root1_gate_lock }, { 137, "a7ca_btslow", "btslow", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 0, &leaf8_gate_lock }, + { 138, "pwm_io", "io_mux", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 0, &leaf0_gate_lock }, + { 139, "pwm_xin", "xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 1, &leaf0_gate_lock }, + { 140, "pwm_xinw", "xinw", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 2, &leaf0_gate_lock }, + { 141, "thcgum_sys", "sys_mux", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 3, &leaf0_gate_lock }, }; -static struct clk *atlas7_clks[ARRAY_SIZE(unit_list)]; +static struct clk *atlas7_clks[ARRAY_SIZE(unit_list) + ARRAY_SIZE(mux_list)]; static int unit_clk_is_enabled(struct clk_hw *hw) { @@ -1609,6 +1617,7 @@ static void __init atlas7_clk_init(struct device_node *np) sirfsoc_clk_vbase + mux->mux_offset, mux->shift, mux->width, mux->mux_flags, NULL); + atlas7_clks[ARRAY_SIZE(unit_list) + i] = clk; BUG_ON(!clk); } @@ -1620,7 +1629,7 @@ static void __init atlas7_clk_init(struct device_node *np) } clk_data.clks = atlas7_clks; - clk_data.clk_num = ARRAY_SIZE(unit_list); + clk_data.clk_num = ARRAY_SIZE(unit_list) + ARRAY_SIZE(mux_list); ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); BUG_ON(ret); diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c index 9fc285d78..77e1e2491 100644 --- a/drivers/clk/sirf/clk-common.c +++ b/drivers/clk/sirf/clk-common.c @@ -7,6 +7,8 @@ * Licensed under GPLv2 or later. */ +#include + #define KHZ 1000 #define MHZ (KHZ * KHZ) @@ -165,10 +167,10 @@ static long cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate, * SiRF SoC has not cpu clock control, * So bypass to it's parent pll. */ - struct clk *parent_clk = clk_get_parent(hw->clk); - struct clk *pll_parent_clk = clk_get_parent(parent_clk); - unsigned long pll_parent_rate = clk_get_rate(pll_parent_clk); - return pll_clk_round_rate(__clk_get_hw(parent_clk), rate, &pll_parent_rate); + struct clk_hw *parent_clk = clk_hw_get_parent(hw); + struct clk_hw *pll_parent_clk = clk_hw_get_parent(parent_clk); + unsigned long pll_parent_rate = clk_hw_get_rate(pll_parent_clk); + return pll_clk_round_rate(parent_clk, rate, &pll_parent_rate); } static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw, @@ -178,8 +180,8 @@ static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw, * SiRF SoC has not cpu clock control, * So return the parent pll rate. */ - struct clk *parent_clk = clk_get_parent(hw->clk); - return __clk_get_rate(parent_clk); + struct clk_hw *parent_clk = clk_hw_get_parent(hw); + return clk_hw_get_rate(parent_clk); } static struct clk_ops std_pll_ops = { diff --git a/drivers/clk/sirf/clk-prima2.c b/drivers/clk/sirf/clk-prima2.c index 6968e2ebc..f92c40264 100644 --- a/drivers/clk/sirf/clk-prima2.c +++ b/drivers/clk/sirf/clk-prima2.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c index 83c6780ff..1cebf253e 100644 --- a/drivers/clk/socfpga/clk-gate-a10.c +++ b/drivers/clk/socfpga/clk-gate-a10.c @@ -13,6 +13,7 @@ * You should have received a copy of the GNU General Public License along with * this program. If not, see . */ +#include #include #include #include @@ -38,7 +39,7 @@ static unsigned long socfpga_gate_clk_recalc_rate(struct clk_hw *hwclk, div = socfpgaclk->fixed_div; else if (socfpgaclk->div_reg) { val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift; - val &= div_mask(socfpgaclk->width); + val &= GENMASK(socfpgaclk->width - 1, 0); div = (1 << val); } diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c index 82449cd76..aa7a6e6a1 100644 --- a/drivers/clk/socfpga/clk-gate.c +++ b/drivers/clk/socfpga/clk-gate.c @@ -15,8 +15,7 @@ * Based from clk-highbank.c * */ -#include -#include +#include #include #include #include @@ -106,7 +105,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk, div = socfpgaclk->fixed_div; else if (socfpgaclk->div_reg) { val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift; - val &= div_mask(socfpgaclk->width); + val &= GENMASK(socfpgaclk->width - 1, 0); /* Check for GPIO_DB_CLK by its offset */ if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET) div = val + 1; diff --git a/drivers/clk/socfpga/clk-periph-a10.c b/drivers/clk/socfpga/clk-periph-a10.c index 9d0181b5a..1f397cb72 100644 --- a/drivers/clk/socfpga/clk-periph-a10.c +++ b/drivers/clk/socfpga/clk-periph-a10.c @@ -13,6 +13,7 @@ * You should have received a copy of the GNU General Public License along with * this program. If not, see . */ +#include #include #include #include @@ -37,7 +38,7 @@ static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk, div = socfpgaclk->fixed_div; } else if (socfpgaclk->div_reg) { div = readl(socfpgaclk->div_reg) >> socfpgaclk->shift; - div &= div_mask(socfpgaclk->width); + div &= GENMASK(socfpgaclk->width - 1, 0); div += 1; } else { div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1); diff --git a/drivers/clk/socfpga/clk-periph.c b/drivers/clk/socfpga/clk-periph.c index 83aeaa219..52c883ea7 100644 --- a/drivers/clk/socfpga/clk-periph.c +++ b/drivers/clk/socfpga/clk-periph.c @@ -15,8 +15,7 @@ * Based from clk-highbank.c * */ -#include -#include +#include #include #include #include @@ -36,7 +35,7 @@ static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk, } else { if (socfpgaclk->div_reg) { val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift; - val &= div_mask(socfpgaclk->width); + val &= GENMASK(socfpgaclk->width - 1, 0); parent_rate /= (val + 1); } div = ((readl(socfpgaclk->hw.reg) & 0x1ff) + 1); @@ -45,8 +44,17 @@ static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk, return parent_rate / div; } +static u8 clk_periclk_get_parent(struct clk_hw *hwclk) +{ + u32 clk_src; + + clk_src = readl(clk_mgr_base_addr + CLKMGR_DBCTRL); + return clk_src & 0x1; +} + static const struct clk_ops periclk_ops = { .recalc_rate = clk_periclk_recalc_rate, + .get_parent = clk_periclk_get_parent, }; static __init void __socfpga_periph_init(struct device_node *node, @@ -56,7 +64,7 @@ static __init void __socfpga_periph_init(struct device_node *node, struct clk *clk; struct socfpga_periph_clk *periph_clk; const char *clk_name = node->name; - const char *parent_name; + const char *parent_name[SOCFPGA_MAX_PARENTS]; struct clk_init_data init; int rc; u32 fixed_div; @@ -90,9 +98,10 @@ static __init void __socfpga_periph_init(struct device_node *node, init.name = clk_name; init.ops = ops; init.flags = 0; - parent_name = of_clk_get_parent_name(node, 0); - init.parent_names = &parent_name; - init.num_parents = 1; + + init.num_parents = of_clk_parent_fill(node, parent_name, + SOCFPGA_MAX_PARENTS); + init.parent_names = parent_name; periph_clk->hw.hw.init = &init; diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c index 1178b11ba..402d630bd 100644 --- a/drivers/clk/socfpga/clk-pll-a10.c +++ b/drivers/clk/socfpga/clk-pll-a10.c @@ -13,6 +13,7 @@ * You should have received a copy of the GNU General Public License along with * this program. If not, see . */ +#include #include #include #include diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c index 8f26b5234..c7f463172 100644 --- a/drivers/clk/socfpga/clk-pll.c +++ b/drivers/clk/socfpga/clk-pll.c @@ -15,8 +15,7 @@ * Based from clk-highbank.c * */ -#include -#include +#include #include #include #include diff --git a/drivers/clk/socfpga/clk.h b/drivers/clk/socfpga/clk.h index 603973ab7..814c7247b 100644 --- a/drivers/clk/socfpga/clk.h +++ b/drivers/clk/socfpga/clk.h @@ -18,16 +18,15 @@ #define __SOCFPGA_CLK_H #include -#include /* Clock Manager offsets */ #define CLKMGR_CTRL 0x0 #define CLKMGR_BYPASS 0x4 +#define CLKMGR_DBCTRL 0x10 #define CLKMGR_L4SRC 0x70 #define CLKMGR_PERPLL_SRC 0xAC #define SOCFPGA_MAX_PARENTS 5 -#define div_mask(width) ((1 << (width)) - 1) #define streq(a, b) (strcmp((a), (b)) == 0) #define SYSMGR_SDMMC_CTRL_SET(smplsel, drvsel) \ diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c index 5ebddc528..dc21ca460 100644 --- a/drivers/clk/spear/clk-vco-pll.c +++ b/drivers/clk/spear/clk-vco-pll.c @@ -87,7 +87,7 @@ static long clk_pll_round_rate_index(struct clk_hw *hw, unsigned long drate, struct clk_pll *pll = to_clk_pll(hw); unsigned long prev_rate, vco_prev_rate, rate = 0; unsigned long vco_parent_rate = - __clk_get_rate(__clk_get_parent(__clk_get_parent(hw->clk))); + clk_hw_get_rate(clk_hw_get_parent(clk_hw_get_parent(hw))); if (!prate) { pr_err("%s: prate is must for pll clk\n", __func__); diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c index 222ce108b..009bd1410 100644 --- a/drivers/clk/spear/spear1310_clock.c +++ b/drivers/clk/spear/spear1310_clock.c @@ -11,7 +11,6 @@ * warranty of any kind, whether express or implied. */ -#include #include #include #include diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c index 973c9d3fb..9c7abfd95 100644 --- a/drivers/clk/spear/spear1340_clock.c +++ b/drivers/clk/spear/spear1340_clock.c @@ -11,7 +11,6 @@ * warranty of any kind, whether express or implied. */ -#include #include #include #include diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c index 231061fa7..e24f85cd4 100644 --- a/drivers/clk/spear/spear6xx_clock.c +++ b/drivers/clk/spear/spear6xx_clock.c @@ -9,7 +9,6 @@ * warranty of any kind, whether express or implied. */ -#include #include #include #include diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c index 8dd8cce27..bd355ee33 100644 --- a/drivers/clk/st/clk-flexgen.c +++ b/drivers/clk/st/clk-flexgen.c @@ -5,6 +5,7 @@ * Author: Maxime Coquelin for ST-Microelectronics. * License terms: GNU General Public License (GPL), version 2 */ +#include #include #include #include @@ -44,7 +45,7 @@ static int flexgen_enable(struct clk_hw *hw) clk_gate_ops.enable(fgate_hw); - pr_debug("%s: flexgen output enabled\n", __clk_get_name(hw->clk)); + pr_debug("%s: flexgen output enabled\n", clk_hw_get_name(hw)); return 0; } @@ -58,7 +59,7 @@ static void flexgen_disable(struct clk_hw *hw) clk_gate_ops.disable(fgate_hw); - pr_debug("%s: flexgen output disabled\n", __clk_get_name(hw->clk)); + pr_debug("%s: flexgen output disabled\n", clk_hw_get_name(hw)); } static int flexgen_is_enabled(struct clk_hw *hw) @@ -108,7 +109,7 @@ static long flexgen_round_rate(struct clk_hw *hw, unsigned long rate, /* Round div according to exact prate and wished rate */ div = clk_best_div(*prate, rate); - if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) { + if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) { *prate = rate * div; return rate; } @@ -243,7 +244,7 @@ static const char ** __init flexgen_get_parents(struct device_node *np, int *num_parents) { const char **parents; - int nparents, i; + int nparents; nparents = of_clk_get_parent_count(np); if (WARN_ON(nparents <= 0)) @@ -253,10 +254,8 @@ static const char ** __init flexgen_get_parents(struct device_node *np, if (!parents) return NULL; - for (i = 0; i < nparents; i++) - parents[i] = of_clk_get_parent_name(np, i); + *num_parents = of_clk_parent_fill(np, parents, nparents); - *num_parents = nparents; return parents; } diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c index d9eb2e1d8..576cd0354 100644 --- a/drivers/clk/st/clkgen-fsyn.c +++ b/drivers/clk/st/clkgen-fsyn.c @@ -15,6 +15,7 @@ #include #include +#include #include #include "clkgen.h" @@ -306,7 +307,7 @@ static const struct clkgen_quadfs_data st_fs660c32_F_416 = { .get_rate = clk_fs660c32_dig_get_rate, }; -static const struct clkgen_quadfs_data st_fs660c32_C_407 = { +static const struct clkgen_quadfs_data st_fs660c32_C = { .nrst_present = true, .nrst = { CLKGEN_FIELD(0x2f0, 0x1, 0), CLKGEN_FIELD(0x2f0, 0x1, 1), @@ -349,7 +350,7 @@ static const struct clkgen_quadfs_data st_fs660c32_C_407 = { .get_rate = clk_fs660c32_dig_get_rate, }; -static const struct clkgen_quadfs_data st_fs660c32_D_407 = { +static const struct clkgen_quadfs_data st_fs660c32_D = { .nrst_present = true, .nrst = { CLKGEN_FIELD(0x2a0, 0x1, 0), CLKGEN_FIELD(0x2a0, 0x1, 1), @@ -512,7 +513,7 @@ static unsigned long quadfs_pll_fs660c32_recalc_rate(struct clk_hw *hw, params.ndiv = CLKGEN_READ(pll, ndiv); if (clk_fs660c32_vco_get_rate(parent_rate, ¶ms, &rate)) pr_err("%s:%s error calculating rate\n", - __clk_get_name(hw->clk), __func__); + clk_hw_get_name(hw), __func__); pll->ndiv = params.ndiv; @@ -557,7 +558,7 @@ static long quadfs_pll_fs660c32_round_rate(struct clk_hw *hw, unsigned long rate clk_fs660c32_vco_get_rate(*prate, ¶ms, &rate); pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n", - __func__, __clk_get_name(hw->clk), + __func__, clk_hw_get_name(hw), rate, (unsigned int)params.sdiv, (unsigned int)params.mdiv, (unsigned int)params.pe, (unsigned int)params.nsdiv); @@ -580,7 +581,7 @@ static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate, clk_fs660c32_vco_get_rate(parent_rate, ¶ms, &hwrate); pr_debug("%s: %s new rate %ld [ndiv=0x%x]\n", - __func__, __clk_get_name(hw->clk), + __func__, clk_hw_get_name(hw), hwrate, (unsigned int)params.ndiv); if (!hwrate) @@ -744,7 +745,7 @@ static int quadfs_fsynth_enable(struct clk_hw *hw) struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); unsigned long flags = 0; - pr_debug("%s: %s\n", __func__, __clk_get_name(hw->clk)); + pr_debug("%s: %s\n", __func__, clk_hw_get_name(hw)); quadfs_fsynth_program_rate(fs); @@ -769,7 +770,7 @@ static void quadfs_fsynth_disable(struct clk_hw *hw) struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); unsigned long flags = 0; - pr_debug("%s: %s\n", __func__, __clk_get_name(hw->clk)); + pr_debug("%s: %s\n", __func__, clk_hw_get_name(hw)); if (fs->lock) spin_lock_irqsave(fs->lock, flags); @@ -786,7 +787,7 @@ static int quadfs_fsynth_is_enabled(struct clk_hw *hw) u32 nsb = CLKGEN_READ(fs, nsb[fs->chan]); pr_debug("%s: %s enable bit = 0x%x\n", - __func__, __clk_get_name(hw->clk), nsb); + __func__, clk_hw_get_name(hw), nsb); return fs->data->standby_polarity ? !nsb : !!nsb; } @@ -945,10 +946,10 @@ static unsigned long quadfs_recalc_rate(struct clk_hw *hw, if (clk_fs_get_rate(parent_rate, ¶ms, &rate)) { pr_err("%s:%s error calculating rate\n", - __clk_get_name(hw->clk), __func__); + clk_hw_get_name(hw), __func__); } - pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate); + pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate); return rate; } @@ -961,7 +962,7 @@ static long quadfs_round_rate(struct clk_hw *hw, unsigned long rate, rate = quadfs_find_best_rate(hw, rate, *prate, ¶ms); pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n", - __func__, __clk_get_name(hw->clk), + __func__, clk_hw_get_name(hw), rate, (unsigned int)params.sdiv, (unsigned int)params.mdiv, (unsigned int)params.pe, (unsigned int)params.nsdiv); @@ -1076,11 +1077,11 @@ static const struct of_device_id quadfs_of_match[] = { }, { .compatible = "st,stih407-quadfs660-C", - .data = &st_fs660c32_C_407 + .data = &st_fs660c32_C }, { .compatible = "st,stih407-quadfs660-D", - .data = &st_fs660c32_D_407 + .data = &st_fs660c32_D }, {} }; diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c index 717c4a91a..4f7f6c00b 100644 --- a/drivers/clk/st/clkgen-mux.c +++ b/drivers/clk/st/clkgen-mux.c @@ -15,6 +15,7 @@ #include #include +#include #include static DEFINE_SPINLOCK(clkgena_divmux_lock); @@ -24,20 +25,17 @@ static const char ** __init clkgen_mux_get_parents(struct device_node *np, int *num_parents) { const char **parents; - int nparents, i; + int nparents; nparents = of_clk_get_parent_count(np); if (WARN_ON(nparents <= 0)) return ERR_PTR(-EINVAL); - parents = kzalloc(nparents * sizeof(const char *), GFP_KERNEL); + parents = kcalloc(nparents, sizeof(const char *), GFP_KERNEL); if (!parents) return ERR_PTR(-ENOMEM); - for (i = 0; i < nparents; i++) - parents[i] = of_clk_get_parent_name(np, i); - - *num_parents = nparents; + *num_parents = of_clk_parent_fill(np, parents, nparents); return parents; } @@ -141,7 +139,7 @@ static u8 clkgena_divmux_get_parent(struct clk_hw *hw) genamux->muxsel = clk_mux_ops.get_parent(mux_hw); if ((s8)genamux->muxsel < 0) { pr_debug("%s: %s: Invalid parent, setting to default.\n", - __func__, __clk_get_name(hw->clk)); + __func__, clk_hw_get_name(hw)); genamux->muxsel = 0; } @@ -215,7 +213,7 @@ static const struct clk_ops clkgena_divmux_ops = { /** * clk_register_genamux - register a genamux clock with the clock framework */ -static struct clk *clk_register_genamux(const char *name, +static struct clk * __init clk_register_genamux(const char *name, const char **parent_names, u8 num_parents, void __iomem *reg, const struct clkgena_divmux_data *muxdata, @@ -369,11 +367,10 @@ static const struct of_device_id clkgena_divmux_of_match[] = { {} }; -static void __iomem * __init clkgen_get_register_base( - struct device_node *np) +static void __iomem * __init clkgen_get_register_base(struct device_node *np) { struct device_node *pnode; - void __iomem *reg = NULL; + void __iomem *reg; pnode = of_get_parent(np); if (!pnode) @@ -398,7 +395,7 @@ static void __init st_of_clkgena_divmux_setup(struct device_node *np) if (WARN_ON(!match)) return; - data = (struct clkgena_divmux_data *)match->data; + data = match->data; reg = clkgen_get_register_base(np); if (!reg) @@ -406,18 +403,18 @@ static void __init st_of_clkgena_divmux_setup(struct device_node *np) parents = clkgen_mux_get_parents(np, &num_parents); if (IS_ERR(parents)) - return; + goto err_parents; clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); if (!clk_data) - goto err; + goto err_alloc; clk_data->clk_num = data->num_outputs; - clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *), + clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *), GFP_KERNEL); if (!clk_data->clks) - goto err; + goto err_alloc_clks; for (i = 0; i < clk_data->clk_num; i++) { struct clk *clk; @@ -447,11 +444,13 @@ static void __init st_of_clkgena_divmux_setup(struct device_node *np) of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); return; err: - if (clk_data) - kfree(clk_data->clks); - + kfree(clk_data->clks); +err_alloc_clks: kfree(clk_data); +err_alloc: kfree(parents); +err_parents: + iounmap(reg); } CLK_OF_DECLARE(clkgenadivmux, "st,clkgena-divmux", st_of_clkgena_divmux_setup); @@ -491,7 +490,7 @@ static void __init st_of_clkgena_prediv_setup(struct device_node *np) void __iomem *reg; const char *parent_name, *clk_name; struct clk *clk; - struct clkgena_prediv_data *data; + const struct clkgena_prediv_data *data; match = of_match_node(clkgena_prediv_of_match, np); if (!match) { @@ -499,7 +498,7 @@ static void __init st_of_clkgena_prediv_setup(struct device_node *np) return; } - data = (struct clkgena_prediv_data *)match->data; + data = match->data; reg = clkgen_get_register_base(np); if (!reg) @@ -507,18 +506,18 @@ static void __init st_of_clkgena_prediv_setup(struct device_node *np) parent_name = of_clk_get_parent_name(np, 0); if (!parent_name) - return; + goto err; if (of_property_read_string_index(np, "clock-output-names", 0, &clk_name)) - return; + goto err; clk = clk_register_divider_table(NULL, clk_name, parent_name, CLK_GET_RATE_NOCACHE, reg + data->offset, data->shift, 1, 0, data->table, NULL); if (IS_ERR(clk)) - return; + goto err; of_clk_add_provider(np, of_clk_src_simple_get, clk); pr_debug("%s: parent %s rate %u\n", @@ -527,6 +526,8 @@ static void __init st_of_clkgena_prediv_setup(struct device_node *np) (unsigned int)clk_get_rate(clk)); return; +err: + iounmap(reg); } CLK_OF_DECLARE(clkgenaprediv, "st,clkgena-prediv", st_of_clkgena_prediv_setup); @@ -630,7 +631,7 @@ static void __init st_of_clkgen_mux_setup(struct device_node *np) void __iomem *reg; const char **parents; int num_parents; - struct clkgen_mux_data *data; + const struct clkgen_mux_data *data; match = of_match_node(mux_of_match, np); if (!match) { @@ -638,7 +639,7 @@ static void __init st_of_clkgen_mux_setup(struct device_node *np) return; } - data = (struct clkgen_mux_data *)match->data; + data = match->data; reg = of_iomap(np, 0); if (!reg) { @@ -650,7 +651,7 @@ static void __init st_of_clkgen_mux_setup(struct device_node *np) if (IS_ERR(parents)) { pr_err("%s: Failed to get parents (%ld)\n", __func__, PTR_ERR(parents)); - return; + goto err_parents; } clk = clk_register_mux(NULL, np->name, parents, num_parents, @@ -666,12 +667,14 @@ static void __init st_of_clkgen_mux_setup(struct device_node *np) __clk_get_name(clk_get_parent(clk)), (unsigned int)clk_get_rate(clk)); + kfree(parents); of_clk_add_provider(np, of_clk_src_simple_get, clk); + return; err: kfree(parents); - - return; +err_parents: + iounmap(reg); } CLK_OF_DECLARE(clkgen_mux, "st,clkgen-mux", st_of_clkgen_mux_setup); @@ -707,12 +710,12 @@ static void __init st_of_clkgen_vcc_setup(struct device_node *np) const char **parents; int num_parents, i; struct clk_onecell_data *clk_data; - struct clkgen_vcc_data *data; + const struct clkgen_vcc_data *data; match = of_match_node(vcc_of_match, np); if (WARN_ON(!match)) return; - data = (struct clkgen_vcc_data *)match->data; + data = match->data; reg = of_iomap(np, 0); if (!reg) @@ -720,18 +723,18 @@ static void __init st_of_clkgen_vcc_setup(struct device_node *np) parents = clkgen_mux_get_parents(np, &num_parents); if (IS_ERR(parents)) - return; + goto err_parents; clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); if (!clk_data) - goto err; + goto err_alloc; clk_data->clk_num = VCC_MAX_CHANNELS; - clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *), + clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *), GFP_KERNEL); if (!clk_data->clks) - goto err; + goto err_alloc_clks; for (i = 0; i < clk_data->clk_num; i++) { struct clk *clk; @@ -750,21 +753,21 @@ static void __init st_of_clkgen_vcc_setup(struct device_node *np) if (*clk_name == '\0') continue; - gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL); + gate = kzalloc(sizeof(*gate), GFP_KERNEL); if (!gate) - break; + goto err; - div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL); + div = kzalloc(sizeof(*div), GFP_KERNEL); if (!div) { kfree(gate); - break; + goto err; } - mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL); + mux = kzalloc(sizeof(*mux), GFP_KERNEL); if (!mux) { kfree(gate); kfree(div); - break; + goto err; } gate->reg = reg + VCC_GATE_OFFSET; @@ -823,10 +826,12 @@ err: kfree(container_of(composite->mux_hw, struct clk_mux, hw)); } - if (clk_data) - kfree(clk_data->clks); - + kfree(clk_data->clks); +err_alloc_clks: kfree(clk_data); +err_alloc: kfree(parents); +err_parents: + iounmap(reg); } CLK_OF_DECLARE(clkgen_vcc, "st,clkgen-vcc", st_of_clkgen_vcc_setup); diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c index 72d1c27ea..b2a332cf8 100644 --- a/drivers/clk/st/clkgen-pll.c +++ b/drivers/clk/st/clkgen-pll.c @@ -16,6 +16,7 @@ #include #include +#include #include #include "clkgen.h" @@ -192,7 +193,7 @@ static const struct clkgen_pll_data st_pll3200c32_407_a0 = { .ops = &stm_pll3200c32_ops, }; -static const struct clkgen_pll_data st_pll3200c32_407_c0_0 = { +static const struct clkgen_pll_data st_pll3200c32_cx_0 = { /* 407 C0 PLL0 */ .pdn_status = CLKGEN_FIELD(0x2a0, 0x1, 8), .locked_status = CLKGEN_FIELD(0x2a0, 0x1, 24), @@ -204,7 +205,7 @@ static const struct clkgen_pll_data st_pll3200c32_407_c0_0 = { .ops = &stm_pll3200c32_ops, }; -static const struct clkgen_pll_data st_pll3200c32_407_c0_1 = { +static const struct clkgen_pll_data st_pll3200c32_cx_1 = { /* 407 C0 PLL1 */ .pdn_status = CLKGEN_FIELD(0x2c8, 0x1, 8), .locked_status = CLKGEN_FIELD(0x2c8, 0x1, 24), @@ -291,7 +292,7 @@ static unsigned long recalc_stm_pll800c65(struct clk_hw *hw, res = (uint64_t)2 * (uint64_t)parent_rate * (uint64_t)ndiv; rate = (unsigned long)div64_u64(res, mdiv * (1 << pdiv)); - pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate); + pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate); return rate; @@ -316,7 +317,7 @@ static unsigned long recalc_stm_pll1600c65(struct clk_hw *hw, /* Note: input is divided by 1000 to avoid overflow */ rate = ((2 * (parent_rate / 1000) * ndiv) / mdiv) * 1000; - pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate); + pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate); return rate; } @@ -338,7 +339,7 @@ static unsigned long recalc_stm_pll3200c32(struct clk_hw *hw, /* Note: input is divided to avoid overflow */ rate = ((2 * (parent_rate/1000) * ndiv) / idf) * 1000; - pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate); + pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate); return rate; } @@ -365,7 +366,7 @@ static unsigned long recalc_stm_pll1200c32(struct clk_hw *hw, /* Note: input is divided by 1000 to avoid overflow */ rate = (((parent_rate / 1000) * ldf) / (odf * idf)) * 1000; - pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate); + pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate); return rate; } @@ -623,12 +624,12 @@ static const struct of_device_id c32_pll_of_match[] = { .data = &st_pll3200c32_407_a0, }, { - .compatible = "st,stih407-plls-c32-c0_0", - .data = &st_pll3200c32_407_c0_0, + .compatible = "st,plls-c32-cx_0", + .data = &st_pll3200c32_cx_0, }, { - .compatible = "st,stih407-plls-c32-c0_1", - .data = &st_pll3200c32_407_c0_1, + .compatible = "st,plls-c32-cx_1", + .data = &st_pll3200c32_cx_1, }, { .compatible = "st,stih407-plls-c32-a9", diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile index 058f273d6..f5a35b82c 100644 --- a/drivers/clk/sunxi/Makefile +++ b/drivers/clk/sunxi/Makefile @@ -6,6 +6,7 @@ obj-y += clk-sunxi.o clk-factors.o obj-y += clk-a10-hosc.o obj-y += clk-a20-gmac.o obj-y += clk-mod0.o +obj-y += clk-simple-gates.o obj-y += clk-sun8i-mbus.o obj-y += clk-sun9i-core.o obj-y += clk-sun9i-mmc.o diff --git a/drivers/clk/sunxi/clk-a20-gmac.c b/drivers/clk/sunxi/clk-a20-gmac.c index 0dcf4f205..1611b0364 100644 --- a/drivers/clk/sunxi/clk-a20-gmac.c +++ b/drivers/clk/sunxi/clk-a20-gmac.c @@ -80,9 +80,7 @@ static void __init sun7i_a20_gmac_clk_setup(struct device_node *node) goto free_mux; /* gmac clock requires exactly 2 parents */ - parents[0] = of_clk_get_parent_name(node, 0); - parents[1] = of_clk_get_parent_name(node, 1); - if (!parents[0] || !parents[1]) + if (of_clk_parent_fill(node, parents, 2) != 2) goto free_gate; reg = of_iomap(node, 0); diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c index 8c20190a3..59428dbd6 100644 --- a/drivers/clk/sunxi/clk-factors.c +++ b/drivers/clk/sunxi/clk-factors.c @@ -79,41 +79,42 @@ static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate, return rate; } -static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_p) +static int clk_factors_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { - struct clk *clk = hw->clk, *parent, *best_parent = NULL; + struct clk_hw *parent, *best_parent = NULL; int i, num_parents; unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0; /* find the parent that can help provide the fastest rate <= rate */ - num_parents = __clk_get_num_parents(clk); + num_parents = clk_hw_get_num_parents(hw); for (i = 0; i < num_parents; i++) { - parent = clk_get_parent_by_index(clk, i); + parent = clk_hw_get_parent_by_index(hw, i); if (!parent) continue; - if (__clk_get_flags(clk) & CLK_SET_RATE_PARENT) - parent_rate = __clk_round_rate(parent, rate); + if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) + parent_rate = clk_hw_round_rate(parent, req->rate); else - parent_rate = __clk_get_rate(parent); + parent_rate = clk_hw_get_rate(parent); - child_rate = clk_factors_round_rate(hw, rate, &parent_rate); + child_rate = clk_factors_round_rate(hw, req->rate, + &parent_rate); - if (child_rate <= rate && child_rate > best_child_rate) { + if (child_rate <= req->rate && child_rate > best_child_rate) { best_parent = parent; best = parent_rate; best_child_rate = child_rate; } } - if (best_parent) - *best_parent_p = __clk_get_hw(best_parent); - *best_parent_rate = best; + if (!best_parent) + return -EINVAL; - return best_child_rate; + req->best_parent_hw = best_parent; + req->best_parent_rate = best; + req->rate = best_child_rate; + + return 0; } static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate, @@ -174,9 +175,7 @@ struct clk *sunxi_factors_register(struct device_node *node, int i = 0; /* if we have a mux, we will have >1 parents */ - while (i < FACTORS_MAX_PARENTS && - (parents[i] = of_clk_get_parent_name(node, i)) != NULL) - i++; + i = of_clk_parent_fill(node, parents, FACTORS_MAX_PARENTS); /* * some factor clocks, such as pll5 and pll6, may have multiple diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c index 9d028aec5..d167e1efb 100644 --- a/drivers/clk/sunxi/clk-mod0.c +++ b/drivers/clk/sunxi/clk-mod0.c @@ -14,10 +14,11 @@ * GNU General Public License for more details. */ +#include #include -#include #include #include +#include #include "clk-factors.h" diff --git a/drivers/clk/sunxi/clk-simple-gates.c b/drivers/clk/sunxi/clk-simple-gates.c new file mode 100644 index 000000000..6ce91180d --- /dev/null +++ b/drivers/clk/sunxi/clk-simple-gates.c @@ -0,0 +1,158 @@ +/* + * Copyright 2015 Maxime Ripard + * + * Maxime Ripard + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +static DEFINE_SPINLOCK(gates_lock); + +static void __init sunxi_simple_gates_setup(struct device_node *node, + const int protected[], + int nprotected) +{ + struct clk_onecell_data *clk_data; + const char *clk_parent, *clk_name; + struct property *prop; + struct resource res; + void __iomem *clk_reg; + void __iomem *reg; + const __be32 *p; + int number, i = 0, j; + u8 clk_bit; + u32 index; + + reg = of_io_request_and_map(node, 0, of_node_full_name(node)); + if (IS_ERR(reg)) + return; + + clk_parent = of_clk_get_parent_name(node, 0); + + clk_data = kmalloc(sizeof(struct clk_onecell_data), GFP_KERNEL); + if (!clk_data) + goto err_unmap; + + number = of_property_count_u32_elems(node, "clock-indices"); + of_property_read_u32_index(node, "clock-indices", number - 1, &number); + + clk_data->clks = kcalloc(number + 1, sizeof(struct clk *), GFP_KERNEL); + if (!clk_data->clks) + goto err_free_data; + + of_property_for_each_u32(node, "clock-indices", prop, p, index) { + of_property_read_string_index(node, "clock-output-names", + i, &clk_name); + + clk_reg = reg + 4 * (index / 32); + clk_bit = index % 32; + + clk_data->clks[index] = clk_register_gate(NULL, clk_name, + clk_parent, 0, + clk_reg, + clk_bit, + 0, &gates_lock); + i++; + + if (IS_ERR(clk_data->clks[index])) { + WARN_ON(true); + continue; + } + + for (j = 0; j < nprotected; j++) + if (protected[j] == index) + clk_prepare_enable(clk_data->clks[index]); + + } + + clk_data->clk_num = number + 1; + of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + + return; + +err_free_data: + kfree(clk_data); +err_unmap: + iounmap(reg); + of_address_to_resource(node, 0, &res); + release_mem_region(res.start, resource_size(&res)); +} + +static void __init sunxi_simple_gates_init(struct device_node *node) +{ + sunxi_simple_gates_setup(node, NULL, 0); +} + +CLK_OF_DECLARE(sun4i_a10_apb0, "allwinner,sun4i-a10-apb0-gates-clk", + sunxi_simple_gates_init); +CLK_OF_DECLARE(sun4i_a10_apb1, "allwinner,sun4i-a10-apb1-gates-clk", + sunxi_simple_gates_init); +CLK_OF_DECLARE(sun4i_a10_axi, "allwinner,sun4i-a10-axi-gates-clk", + sunxi_simple_gates_init); +CLK_OF_DECLARE(sun5i_a10s_apb0, "allwinner,sun5i-a10s-apb0-gates-clk", + sunxi_simple_gates_init); +CLK_OF_DECLARE(sun5i_a10s_apb1, "allwinner,sun5i-a10s-apb1-gates-clk", + sunxi_simple_gates_init); +CLK_OF_DECLARE(sun5i_a13_apb0, "allwinner,sun5i-a13-apb0-gates-clk", + sunxi_simple_gates_init); +CLK_OF_DECLARE(sun5i_a13_apb1, "allwinner,sun5i-a13-apb1-gates-clk", + sunxi_simple_gates_init); +CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-gates-clk", + sunxi_simple_gates_init); +CLK_OF_DECLARE(sun6i_a31_apb1, "allwinner,sun6i-a31-apb1-gates-clk", + sunxi_simple_gates_init); +CLK_OF_DECLARE(sun6i_a31_apb2, "allwinner,sun6i-a31-apb2-gates-clk", + sunxi_simple_gates_init); +CLK_OF_DECLARE(sun7i_a20_apb0, "allwinner,sun7i-a20-apb0-gates-clk", + sunxi_simple_gates_init); +CLK_OF_DECLARE(sun7i_a20_apb1, "allwinner,sun7i-a20-apb1-gates-clk", + sunxi_simple_gates_init); +CLK_OF_DECLARE(sun8i_a23_ahb1, "allwinner,sun8i-a23-ahb1-gates-clk", + sunxi_simple_gates_init); +CLK_OF_DECLARE(sun8i_a23_apb1, "allwinner,sun8i-a23-apb1-gates-clk", + sunxi_simple_gates_init); +CLK_OF_DECLARE(sun8i_a23_apb2, "allwinner,sun8i-a23-apb2-gates-clk", + sunxi_simple_gates_init); +CLK_OF_DECLARE(sun9i_a80_ahb0, "allwinner,sun9i-a80-ahb0-gates-clk", + sunxi_simple_gates_init); +CLK_OF_DECLARE(sun9i_a80_ahb1, "allwinner,sun9i-a80-ahb1-gates-clk", + sunxi_simple_gates_init); +CLK_OF_DECLARE(sun9i_a80_ahb2, "allwinner,sun9i-a80-ahb2-gates-clk", + sunxi_simple_gates_init); +CLK_OF_DECLARE(sun9i_a80_apb0, "allwinner,sun9i-a80-apb0-gates-clk", + sunxi_simple_gates_init); +CLK_OF_DECLARE(sun9i_a80_apb1, "allwinner,sun9i-a80-apb1-gates-clk", + sunxi_simple_gates_init); + +static const int sun4i_a10_ahb_critical_clocks[] __initconst = { + 14, /* ahb_sdram */ +}; + +static void __init sun4i_a10_ahb_init(struct device_node *node) +{ + sunxi_simple_gates_setup(node, sun4i_a10_ahb_critical_clocks, + ARRAY_SIZE(sun4i_a10_ahb_critical_clocks)); +} +CLK_OF_DECLARE(sun4i_a10_ahb, "allwinner,sun4i-a10-ahb-gates-clk", + sun4i_a10_ahb_init); +CLK_OF_DECLARE(sun5i_a10s_ahb, "allwinner,sun5i-a10s-ahb-gates-clk", + sun4i_a10_ahb_init); +CLK_OF_DECLARE(sun5i_a13_ahb, "allwinner,sun5i-a13-ahb-gates-clk", + sun4i_a10_ahb_init); +CLK_OF_DECLARE(sun7i_a20_ahb, "allwinner,sun7i-a20-ahb-gates-clk", + sun4i_a10_ahb_init); diff --git a/drivers/clk/sunxi/clk-sun6i-ar100.c b/drivers/clk/sunxi/clk-sun6i-ar100.c index 63cf14919..806fd019c 100644 --- a/drivers/clk/sunxi/clk-sun6i-ar100.c +++ b/drivers/clk/sunxi/clk-sun6i-ar100.c @@ -44,28 +44,25 @@ static unsigned long ar100_recalc_rate(struct clk_hw *hw, return (parent_rate >> shift) / (div + 1); } -static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_clk) +static int ar100_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { - int nparents = __clk_get_num_parents(hw->clk); + int nparents = clk_hw_get_num_parents(hw); long best_rate = -EINVAL; int i; - *best_parent_clk = NULL; + req->best_parent_hw = NULL; for (i = 0; i < nparents; i++) { unsigned long parent_rate; unsigned long tmp_rate; - struct clk *parent; + struct clk_hw *parent; unsigned long div; int shift; - parent = clk_get_parent_by_index(hw->clk, i); - parent_rate = __clk_get_rate(parent); - div = DIV_ROUND_UP(parent_rate, rate); + parent = clk_hw_get_parent_by_index(hw, i); + parent_rate = clk_hw_get_rate(parent); + div = DIV_ROUND_UP(parent_rate, req->rate); /* * The AR100 clk contains 2 divisors: @@ -101,14 +98,19 @@ static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate, continue; tmp_rate = (parent_rate >> shift) / div; - if (!*best_parent_clk || tmp_rate > best_rate) { - *best_parent_clk = __clk_get_hw(parent); - *best_parent_rate = parent_rate; + if (!req->best_parent_hw || tmp_rate > best_rate) { + req->best_parent_hw = parent; + req->best_parent_rate = parent_rate; best_rate = tmp_rate; } } - return best_rate; + if (best_rate < 0) + return best_rate; + + req->rate = best_rate; + + return 0; } static int ar100_set_parent(struct clk_hw *hw, u8 index) @@ -180,7 +182,6 @@ static int sun6i_a31_ar100_clk_probe(struct platform_device *pdev) struct resource *r; struct clk *clk; int nparents; - int i; ar100 = devm_kzalloc(&pdev->dev, sizeof(*ar100), GFP_KERNEL); if (!ar100) @@ -195,8 +196,7 @@ static int sun6i_a31_ar100_clk_probe(struct platform_device *pdev) if (nparents > SUN6I_AR100_MAX_PARENTS) nparents = SUN6I_AR100_MAX_PARENTS; - for (i = 0; i < nparents; i++) - parents[i] = of_clk_get_parent_name(np, i); + of_clk_parent_fill(np, parents, nparents); of_property_read_string(np, "clock-output-names", &clk_name); diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c index 14cd02606..bf117a636 100644 --- a/drivers/clk/sunxi/clk-sun8i-mbus.c +++ b/drivers/clk/sunxi/clk-sun8i-mbus.c @@ -14,8 +14,8 @@ * GNU General Public License for more details. */ +#include #include -#include #include #include "clk-factors.h" diff --git a/drivers/clk/sunxi/clk-sun9i-core.c b/drivers/clk/sunxi/clk-sun9i-core.c index 887f4ea16..6c4c98324 100644 --- a/drivers/clk/sunxi/clk-sun9i-core.c +++ b/drivers/clk/sunxi/clk-sun9i-core.c @@ -14,8 +14,8 @@ * GNU General Public License for more details. */ +#include #include -#include #include #include #include diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c index 710c27364..3436a948b 100644 --- a/drivers/clk/sunxi/clk-sun9i-mmc.c +++ b/drivers/clk/sunxi/clk-sun9i-mmc.c @@ -14,14 +14,15 @@ * GNU General Public License for more details. */ +#include #include -#include #include #include #include #include #include #include +#include #include #define SUN9I_MMC_WIDTH 4 diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index abf7b37fa..413070d07 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -14,11 +14,13 @@ * GNU General Public License for more details. */ +#include #include #include #include #include #include +#include #include #include @@ -118,42 +120,42 @@ static long sun6i_ahb1_clk_round(unsigned long rate, u8 *divp, u8 *pre_divp, return (parent_rate / calcm) >> calcp; } -static long sun6i_ahb1_clk_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_clk) +static int sun6i_ahb1_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { - struct clk *clk = hw->clk, *parent, *best_parent = NULL; + struct clk_hw *parent, *best_parent = NULL; int i, num_parents; unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0; /* find the parent that can help provide the fastest rate <= rate */ - num_parents = __clk_get_num_parents(clk); + num_parents = clk_hw_get_num_parents(hw); for (i = 0; i < num_parents; i++) { - parent = clk_get_parent_by_index(clk, i); + parent = clk_hw_get_parent_by_index(hw, i); if (!parent) continue; - if (__clk_get_flags(clk) & CLK_SET_RATE_PARENT) - parent_rate = __clk_round_rate(parent, rate); + if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) + parent_rate = clk_hw_round_rate(parent, req->rate); else - parent_rate = __clk_get_rate(parent); + parent_rate = clk_hw_get_rate(parent); - child_rate = sun6i_ahb1_clk_round(rate, NULL, NULL, i, + child_rate = sun6i_ahb1_clk_round(req->rate, NULL, NULL, i, parent_rate); - if (child_rate <= rate && child_rate > best_child_rate) { + if (child_rate <= req->rate && child_rate > best_child_rate) { best_parent = parent; best = parent_rate; best_child_rate = child_rate; } } - if (best_parent) - *best_parent_clk = __clk_get_hw(best_parent); - *best_parent_rate = best; + if (!best_parent) + return -EINVAL; - return best_child_rate; + req->best_parent_hw = best_parent; + req->best_parent_rate = best; + req->rate = best_child_rate; + + return 0; } static int sun6i_ahb1_clk_set_rate(struct clk_hw *hw, unsigned long rate, @@ -195,17 +197,14 @@ static void __init sun6i_ahb1_clk_setup(struct device_node *node) const char *clk_name = node->name; const char *parents[SUN6I_AHB1_MAX_PARENTS]; void __iomem *reg; - int i = 0; + int i; reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) return; /* we have a mux, we will have >1 parents */ - while (i < SUN6I_AHB1_MAX_PARENTS && - (parents[i] = of_clk_get_parent_name(node, i)) != NULL) - i++; - + i = of_clk_parent_fill(node, parents, SUN6I_AHB1_MAX_PARENTS); of_property_read_string(node, "clock-output-names", &clk_name); ahb1 = kzalloc(sizeof(struct sun6i_ahb1_clk), GFP_KERNEL); @@ -786,14 +785,11 @@ static void __init sunxi_mux_clk_setup(struct device_node *node, const char *clk_name = node->name; const char *parents[SUNXI_MAX_PARENTS]; void __iomem *reg; - int i = 0; + int i; reg = of_iomap(node, 0); - while (i < SUNXI_MAX_PARENTS && - (parents[i] = of_clk_get_parent_name(node, i)) != NULL) - i++; - + i = of_clk_parent_fill(node, parents, SUNXI_MAX_PARENTS); of_property_read_string(node, "clock-output-names", &clk_name); clk = clk_register_mux(NULL, clk_name, parents, i, @@ -900,150 +896,6 @@ struct gates_data { DECLARE_BITMAP(mask, SUNXI_GATES_MAX_SIZE); }; -static const struct gates_data sun4i_axi_gates_data __initconst = { - .mask = {1}, -}; - -static const struct gates_data sun4i_ahb_gates_data __initconst = { - .mask = {0x7F77FFF, 0x14FB3F}, -}; - -static const struct gates_data sun5i_a10s_ahb_gates_data __initconst = { - .mask = {0x147667e7, 0x185915}, -}; - -static const struct gates_data sun5i_a13_ahb_gates_data __initconst = { - .mask = {0x107067e7, 0x185111}, -}; - -static const struct gates_data sun6i_a31_ahb1_gates_data __initconst = { - .mask = {0xEDFE7F62, 0x794F931}, -}; - -static const struct gates_data sun7i_a20_ahb_gates_data __initconst = { - .mask = { 0x12f77fff, 0x16ff3f }, -}; - -static const struct gates_data sun8i_a23_ahb1_gates_data __initconst = { - .mask = {0x25386742, 0x2505111}, -}; - -static const struct gates_data sun9i_a80_ahb0_gates_data __initconst = { - .mask = {0xF5F12B}, -}; - -static const struct gates_data sun9i_a80_ahb1_gates_data __initconst = { - .mask = {0x1E20003}, -}; - -static const struct gates_data sun9i_a80_ahb2_gates_data __initconst = { - .mask = {0x9B7}, -}; - -static const struct gates_data sun4i_apb0_gates_data __initconst = { - .mask = {0x4EF}, -}; - -static const struct gates_data sun5i_a10s_apb0_gates_data __initconst = { - .mask = {0x469}, -}; - -static const struct gates_data sun5i_a13_apb0_gates_data __initconst = { - .mask = {0x61}, -}; - -static const struct gates_data sun7i_a20_apb0_gates_data __initconst = { - .mask = { 0x4ff }, -}; - -static const struct gates_data sun9i_a80_apb0_gates_data __initconst = { - .mask = {0xEB822}, -}; - -static const struct gates_data sun4i_apb1_gates_data __initconst = { - .mask = {0xFF00F7}, -}; - -static const struct gates_data sun5i_a10s_apb1_gates_data __initconst = { - .mask = {0xf0007}, -}; - -static const struct gates_data sun5i_a13_apb1_gates_data __initconst = { - .mask = {0xa0007}, -}; - -static const struct gates_data sun6i_a31_apb1_gates_data __initconst = { - .mask = {0x3031}, -}; - -static const struct gates_data sun8i_a23_apb1_gates_data __initconst = { - .mask = {0x3021}, -}; - -static const struct gates_data sun6i_a31_apb2_gates_data __initconst = { - .mask = {0x3F000F}, -}; - -static const struct gates_data sun7i_a20_apb1_gates_data __initconst = { - .mask = { 0xff80ff }, -}; - -static const struct gates_data sun9i_a80_apb1_gates_data __initconst = { - .mask = {0x3F001F}, -}; - -static const struct gates_data sun8i_a23_apb2_gates_data __initconst = { - .mask = {0x1F0007}, -}; - -static void __init sunxi_gates_clk_setup(struct device_node *node, - struct gates_data *data) -{ - struct clk_onecell_data *clk_data; - const char *clk_parent; - const char *clk_name; - void __iomem *reg; - int qty; - int i = 0; - int j = 0; - - reg = of_iomap(node, 0); - - clk_parent = of_clk_get_parent_name(node, 0); - - /* Worst-case size approximation and memory allocation */ - qty = find_last_bit(data->mask, SUNXI_GATES_MAX_SIZE); - clk_data = kmalloc(sizeof(struct clk_onecell_data), GFP_KERNEL); - if (!clk_data) - return; - clk_data->clks = kzalloc((qty+1) * sizeof(struct clk *), GFP_KERNEL); - if (!clk_data->clks) { - kfree(clk_data); - return; - } - - for_each_set_bit(i, data->mask, SUNXI_GATES_MAX_SIZE) { - of_property_read_string_index(node, "clock-output-names", - j, &clk_name); - - clk_data->clks[i] = clk_register_gate(NULL, clk_name, - clk_parent, 0, - reg + 4 * (i/32), i % 32, - 0, &clk_lock); - WARN_ON(IS_ERR(clk_data->clks[i])); - clk_register_clkdev(clk_data->clks[i], clk_name, NULL); - - j++; - } - - /* Adjust to the real max */ - clk_data->clk_num = i; - - of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); -} - - - /** * sunxi_divs_clk_setup() helper data */ @@ -1281,34 +1133,6 @@ static const struct of_device_id clk_mux_match[] __initconst = { {} }; -/* Matches for gate clocks */ -static const struct of_device_id clk_gates_match[] __initconst = { - {.compatible = "allwinner,sun4i-a10-axi-gates-clk", .data = &sun4i_axi_gates_data,}, - {.compatible = "allwinner,sun4i-a10-ahb-gates-clk", .data = &sun4i_ahb_gates_data,}, - {.compatible = "allwinner,sun5i-a10s-ahb-gates-clk", .data = &sun5i_a10s_ahb_gates_data,}, - {.compatible = "allwinner,sun5i-a13-ahb-gates-clk", .data = &sun5i_a13_ahb_gates_data,}, - {.compatible = "allwinner,sun6i-a31-ahb1-gates-clk", .data = &sun6i_a31_ahb1_gates_data,}, - {.compatible = "allwinner,sun7i-a20-ahb-gates-clk", .data = &sun7i_a20_ahb_gates_data,}, - {.compatible = "allwinner,sun8i-a23-ahb1-gates-clk", .data = &sun8i_a23_ahb1_gates_data,}, - {.compatible = "allwinner,sun9i-a80-ahb0-gates-clk", .data = &sun9i_a80_ahb0_gates_data,}, - {.compatible = "allwinner,sun9i-a80-ahb1-gates-clk", .data = &sun9i_a80_ahb1_gates_data,}, - {.compatible = "allwinner,sun9i-a80-ahb2-gates-clk", .data = &sun9i_a80_ahb2_gates_data,}, - {.compatible = "allwinner,sun4i-a10-apb0-gates-clk", .data = &sun4i_apb0_gates_data,}, - {.compatible = "allwinner,sun5i-a10s-apb0-gates-clk", .data = &sun5i_a10s_apb0_gates_data,}, - {.compatible = "allwinner,sun5i-a13-apb0-gates-clk", .data = &sun5i_a13_apb0_gates_data,}, - {.compatible = "allwinner,sun7i-a20-apb0-gates-clk", .data = &sun7i_a20_apb0_gates_data,}, - {.compatible = "allwinner,sun9i-a80-apb0-gates-clk", .data = &sun9i_a80_apb0_gates_data,}, - {.compatible = "allwinner,sun4i-a10-apb1-gates-clk", .data = &sun4i_apb1_gates_data,}, - {.compatible = "allwinner,sun5i-a10s-apb1-gates-clk", .data = &sun5i_a10s_apb1_gates_data,}, - {.compatible = "allwinner,sun5i-a13-apb1-gates-clk", .data = &sun5i_a13_apb1_gates_data,}, - {.compatible = "allwinner,sun6i-a31-apb1-gates-clk", .data = &sun6i_a31_apb1_gates_data,}, - {.compatible = "allwinner,sun7i-a20-apb1-gates-clk", .data = &sun7i_a20_apb1_gates_data,}, - {.compatible = "allwinner,sun8i-a23-apb1-gates-clk", .data = &sun8i_a23_apb1_gates_data,}, - {.compatible = "allwinner,sun9i-a80-apb1-gates-clk", .data = &sun9i_a80_apb1_gates_data,}, - {.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,}, - {.compatible = "allwinner,sun8i-a23-apb2-gates-clk", .data = &sun8i_a23_apb2_gates_data,}, - {} -}; static void __init of_sunxi_table_clock_setup(const struct of_device_id *clk_match, void *function) @@ -1340,9 +1164,6 @@ static void __init sunxi_init_clocks(const char *clocks[], int nclocks) /* Register mux clocks */ of_sunxi_table_clock_setup(clk_mux_match, sunxi_mux_clk_setup); - /* Register gate clocks */ - of_sunxi_table_clock_setup(clk_gates_match, sunxi_gates_clk_setup); - /* Protect the clocks that needs to stay on */ for (i = 0; i < nclocks; i++) { struct clk *clk = clk_get(NULL, clocks[i]); @@ -1354,7 +1175,6 @@ static void __init sunxi_init_clocks(const char *clocks[], int nclocks) static const char *sun4i_a10_critical_clocks[] __initdata = { "pll5_ddr", - "ahb_sdram", }; static void __init sun4i_a10_init_clocks(struct device_node *node) @@ -1367,7 +1187,6 @@ CLK_OF_DECLARE(sun4i_a10_clk_init, "allwinner,sun4i-a10", sun4i_a10_init_clocks) static const char *sun5i_critical_clocks[] __initdata = { "cpu", "pll5_ddr", - "ahb_sdram", }; static void __init sun5i_init_clocks(struct device_node *node) diff --git a/drivers/clk/sunxi/clk-usb.c b/drivers/clk/sunxi/clk-usb.c index 3a25f9588..1a72cd672 100644 --- a/drivers/clk/sunxi/clk-usb.c +++ b/drivers/clk/sunxi/clk-usb.c @@ -14,11 +14,12 @@ * GNU General Public License for more details. */ +#include #include -#include #include #include #include +#include #include diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile index aec862ba7..826c325dc 100644 --- a/drivers/clk/tegra/Makefile +++ b/drivers/clk/tegra/Makefile @@ -1,5 +1,6 @@ obj-y += clk.o obj-y += clk-audio-sync.o +obj-y += clk-dfll.o obj-y += clk-divider.o obj-y += clk-periph.o obj-y += clk-periph-gate.o @@ -16,4 +17,6 @@ obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124.o +obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124-dfll-fcpu.o obj-$(CONFIG_ARCH_TEGRA_132_SOC) += clk-tegra124.o +obj-y += cvb.o diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c new file mode 100644 index 000000000..c4e3a52e2 --- /dev/null +++ b/drivers/clk/tegra/clk-dfll.c @@ -0,0 +1,1763 @@ +/* + * clk-dfll.c - Tegra DFLL clock source common code + * + * Copyright (C) 2012-2014 NVIDIA Corporation. All rights reserved. + * + * Aleksandr Frid + * Paul Walmsley + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * This library is for the DVCO and DFLL IP blocks on the Tegra124 + * SoC. These IP blocks together are also known at NVIDIA as + * "CL-DVFS". To try to avoid confusion, this code refers to them + * collectively as the "DFLL." + * + * The DFLL is a root clocksource which tolerates some amount of + * supply voltage noise. Tegra124 uses it to clock the fast CPU + * complex when the target CPU speed is above a particular rate. The + * DFLL can be operated in either open-loop mode or closed-loop mode. + * In open-loop mode, the DFLL generates an output clock appropriate + * to the supply voltage. In closed-loop mode, when configured with a + * target frequency, the DFLL minimizes supply voltage while + * delivering an average frequency equal to the target. + * + * Devices clocked by the DFLL must be able to tolerate frequency + * variation. In the case of the CPU, it's important to note that the + * CPU cycle time will vary. This has implications for + * performance-measurement code and any code that relies on the CPU + * cycle time to delay for a certain length of time. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "clk-dfll.h" + +/* + * DFLL control registers - access via dfll_{readl,writel} + */ + +/* DFLL_CTRL: DFLL control register */ +#define DFLL_CTRL 0x00 +#define DFLL_CTRL_MODE_MASK 0x03 + +/* DFLL_CONFIG: DFLL sample rate control */ +#define DFLL_CONFIG 0x04 +#define DFLL_CONFIG_DIV_MASK 0xff +#define DFLL_CONFIG_DIV_PRESCALE 32 + +/* DFLL_PARAMS: tuning coefficients for closed loop integrator */ +#define DFLL_PARAMS 0x08 +#define DFLL_PARAMS_CG_SCALE (0x1 << 24) +#define DFLL_PARAMS_FORCE_MODE_SHIFT 22 +#define DFLL_PARAMS_FORCE_MODE_MASK (0x3 << DFLL_PARAMS_FORCE_MODE_SHIFT) +#define DFLL_PARAMS_CF_PARAM_SHIFT 16 +#define DFLL_PARAMS_CF_PARAM_MASK (0x3f << DFLL_PARAMS_CF_PARAM_SHIFT) +#define DFLL_PARAMS_CI_PARAM_SHIFT 8 +#define DFLL_PARAMS_CI_PARAM_MASK (0x7 << DFLL_PARAMS_CI_PARAM_SHIFT) +#define DFLL_PARAMS_CG_PARAM_SHIFT 0 +#define DFLL_PARAMS_CG_PARAM_MASK (0xff << DFLL_PARAMS_CG_PARAM_SHIFT) + +/* DFLL_TUNE0: delay line configuration register 0 */ +#define DFLL_TUNE0 0x0c + +/* DFLL_TUNE1: delay line configuration register 1 */ +#define DFLL_TUNE1 0x10 + +/* DFLL_FREQ_REQ: target DFLL frequency control */ +#define DFLL_FREQ_REQ 0x14 +#define DFLL_FREQ_REQ_FORCE_ENABLE (0x1 << 28) +#define DFLL_FREQ_REQ_FORCE_SHIFT 16 +#define DFLL_FREQ_REQ_FORCE_MASK (0xfff << DFLL_FREQ_REQ_FORCE_SHIFT) +#define FORCE_MAX 2047 +#define FORCE_MIN -2048 +#define DFLL_FREQ_REQ_SCALE_SHIFT 8 +#define DFLL_FREQ_REQ_SCALE_MASK (0xff << DFLL_FREQ_REQ_SCALE_SHIFT) +#define DFLL_FREQ_REQ_SCALE_MAX 256 +#define DFLL_FREQ_REQ_FREQ_VALID (0x1 << 7) +#define DFLL_FREQ_REQ_MULT_SHIFT 0 +#define DFLL_FREQ_REG_MULT_MASK (0x7f << DFLL_FREQ_REQ_MULT_SHIFT) +#define FREQ_MAX 127 + +/* DFLL_DROOP_CTRL: droop prevention control */ +#define DFLL_DROOP_CTRL 0x1c + +/* DFLL_OUTPUT_CFG: closed loop mode control registers */ +/* NOTE: access via dfll_i2c_{readl,writel} */ +#define DFLL_OUTPUT_CFG 0x20 +#define DFLL_OUTPUT_CFG_I2C_ENABLE (0x1 << 30) +#define OUT_MASK 0x3f +#define DFLL_OUTPUT_CFG_SAFE_SHIFT 24 +#define DFLL_OUTPUT_CFG_SAFE_MASK \ + (OUT_MASK << DFLL_OUTPUT_CFG_SAFE_SHIFT) +#define DFLL_OUTPUT_CFG_MAX_SHIFT 16 +#define DFLL_OUTPUT_CFG_MAX_MASK \ + (OUT_MASK << DFLL_OUTPUT_CFG_MAX_SHIFT) +#define DFLL_OUTPUT_CFG_MIN_SHIFT 8 +#define DFLL_OUTPUT_CFG_MIN_MASK \ + (OUT_MASK << DFLL_OUTPUT_CFG_MIN_SHIFT) +#define DFLL_OUTPUT_CFG_PWM_DELTA (0x1 << 7) +#define DFLL_OUTPUT_CFG_PWM_ENABLE (0x1 << 6) +#define DFLL_OUTPUT_CFG_PWM_DIV_SHIFT 0 +#define DFLL_OUTPUT_CFG_PWM_DIV_MASK \ + (OUT_MASK << DFLL_OUTPUT_CFG_PWM_DIV_SHIFT) + +/* DFLL_OUTPUT_FORCE: closed loop mode voltage forcing control */ +#define DFLL_OUTPUT_FORCE 0x24 +#define DFLL_OUTPUT_FORCE_ENABLE (0x1 << 6) +#define DFLL_OUTPUT_FORCE_VALUE_SHIFT 0 +#define DFLL_OUTPUT_FORCE_VALUE_MASK \ + (OUT_MASK << DFLL_OUTPUT_FORCE_VALUE_SHIFT) + +/* DFLL_MONITOR_CTRL: internal monitor data source control */ +#define DFLL_MONITOR_CTRL 0x28 +#define DFLL_MONITOR_CTRL_FREQ 6 + +/* DFLL_MONITOR_DATA: internal monitor data output */ +#define DFLL_MONITOR_DATA 0x2c +#define DFLL_MONITOR_DATA_NEW_MASK (0x1 << 16) +#define DFLL_MONITOR_DATA_VAL_SHIFT 0 +#define DFLL_MONITOR_DATA_VAL_MASK (0xFFFF << DFLL_MONITOR_DATA_VAL_SHIFT) + +/* + * I2C output control registers - access via dfll_i2c_{readl,writel} + */ + +/* DFLL_I2C_CFG: I2C controller configuration register */ +#define DFLL_I2C_CFG 0x40 +#define DFLL_I2C_CFG_ARB_ENABLE (0x1 << 20) +#define DFLL_I2C_CFG_HS_CODE_SHIFT 16 +#define DFLL_I2C_CFG_HS_CODE_MASK (0x7 << DFLL_I2C_CFG_HS_CODE_SHIFT) +#define DFLL_I2C_CFG_PACKET_ENABLE (0x1 << 15) +#define DFLL_I2C_CFG_SIZE_SHIFT 12 +#define DFLL_I2C_CFG_SIZE_MASK (0x7 << DFLL_I2C_CFG_SIZE_SHIFT) +#define DFLL_I2C_CFG_SLAVE_ADDR_10 (0x1 << 10) +#define DFLL_I2C_CFG_SLAVE_ADDR_SHIFT_7BIT 1 +#define DFLL_I2C_CFG_SLAVE_ADDR_SHIFT_10BIT 0 + +/* DFLL_I2C_VDD_REG_ADDR: PMIC I2C address for closed loop mode */ +#define DFLL_I2C_VDD_REG_ADDR 0x44 + +/* DFLL_I2C_STS: I2C controller status */ +#define DFLL_I2C_STS 0x48 +#define DFLL_I2C_STS_I2C_LAST_SHIFT 1 +#define DFLL_I2C_STS_I2C_REQ_PENDING 0x1 + +/* DFLL_INTR_STS: DFLL interrupt status register */ +#define DFLL_INTR_STS 0x5c + +/* DFLL_INTR_EN: DFLL interrupt enable register */ +#define DFLL_INTR_EN 0x60 +#define DFLL_INTR_MIN_MASK 0x1 +#define DFLL_INTR_MAX_MASK 0x2 + +/* + * Integrated I2C controller registers - relative to td->i2c_controller_base + */ + +/* DFLL_I2C_CLK_DIVISOR: I2C controller clock divisor */ +#define DFLL_I2C_CLK_DIVISOR 0x6c +#define DFLL_I2C_CLK_DIVISOR_MASK 0xffff +#define DFLL_I2C_CLK_DIVISOR_FS_SHIFT 16 +#define DFLL_I2C_CLK_DIVISOR_HS_SHIFT 0 +#define DFLL_I2C_CLK_DIVISOR_PREDIV 8 +#define DFLL_I2C_CLK_DIVISOR_HSMODE_PREDIV 12 + +/* + * Other constants + */ + +/* MAX_DFLL_VOLTAGES: number of LUT entries in the DFLL IP block */ +#define MAX_DFLL_VOLTAGES 33 + +/* + * REF_CLK_CYC_PER_DVCO_SAMPLE: the number of ref_clk cycles that the hardware + * integrates the DVCO counter over - used for debug rate monitoring and + * droop control + */ +#define REF_CLK_CYC_PER_DVCO_SAMPLE 4 + +/* + * REF_CLOCK_RATE: the DFLL reference clock rate currently supported by this + * driver, in Hz + */ +#define REF_CLOCK_RATE 51000000UL + +#define DVCO_RATE_TO_MULT(rate, ref_rate) ((rate) / ((ref_rate) / 2)) +#define MULT_TO_DVCO_RATE(mult, ref_rate) ((mult) * ((ref_rate) / 2)) + +/** + * enum dfll_ctrl_mode - DFLL hardware operating mode + * @DFLL_UNINITIALIZED: (uninitialized state - not in hardware bitfield) + * @DFLL_DISABLED: DFLL not generating an output clock + * @DFLL_OPEN_LOOP: DVCO running, but DFLL not adjusting voltage + * @DFLL_CLOSED_LOOP: DVCO running, and DFLL adjusting voltage to match + * the requested rate + * + * The integer corresponding to the last two states, minus one, is + * written to the DFLL hardware to change operating modes. + */ +enum dfll_ctrl_mode { + DFLL_UNINITIALIZED = 0, + DFLL_DISABLED = 1, + DFLL_OPEN_LOOP = 2, + DFLL_CLOSED_LOOP = 3, +}; + +/** + * enum dfll_tune_range - voltage range that the driver believes it's in + * @DFLL_TUNE_UNINITIALIZED: DFLL tuning not yet programmed + * @DFLL_TUNE_LOW: DFLL in the low-voltage range (or open-loop mode) + * + * Some DFLL tuning parameters may need to change depending on the + * DVCO's voltage; these states represent the ranges that the driver + * supports. These are software states; these values are never + * written into registers. + */ +enum dfll_tune_range { + DFLL_TUNE_UNINITIALIZED = 0, + DFLL_TUNE_LOW = 1, +}; + +/** + * struct dfll_rate_req - target DFLL rate request data + * @rate: target frequency, after the postscaling + * @dvco_target_rate: target frequency, after the postscaling + * @lut_index: LUT index at which voltage the dvco_target_rate will be reached + * @mult_bits: value to program to the MULT bits of the DFLL_FREQ_REQ register + * @scale_bits: value to program to the SCALE bits of the DFLL_FREQ_REQ register + */ +struct dfll_rate_req { + unsigned long rate; + unsigned long dvco_target_rate; + int lut_index; + u8 mult_bits; + u8 scale_bits; +}; + +struct tegra_dfll { + struct device *dev; + struct tegra_dfll_soc_data *soc; + + void __iomem *base; + void __iomem *i2c_base; + void __iomem *i2c_controller_base; + void __iomem *lut_base; + + struct regulator *vdd_reg; + struct clk *soc_clk; + struct clk *ref_clk; + struct clk *i2c_clk; + struct clk *dfll_clk; + struct reset_control *dvco_rst; + unsigned long ref_rate; + unsigned long i2c_clk_rate; + unsigned long dvco_rate_min; + + enum dfll_ctrl_mode mode; + enum dfll_tune_range tune_range; + struct dentry *debugfs_dir; + struct clk_hw dfll_clk_hw; + const char *output_clock_name; + struct dfll_rate_req last_req; + unsigned long last_unrounded_rate; + + /* Parameters from DT */ + u32 droop_ctrl; + u32 sample_rate; + u32 force_mode; + u32 cf; + u32 ci; + u32 cg; + bool cg_scale; + + /* I2C interface parameters */ + u32 i2c_fs_rate; + u32 i2c_reg; + u32 i2c_slave_addr; + + /* i2c_lut array entries are regulator framework selectors */ + unsigned i2c_lut[MAX_DFLL_VOLTAGES]; + int i2c_lut_size; + u8 lut_min, lut_max, lut_safe; +}; + +#define clk_hw_to_dfll(_hw) container_of(_hw, struct tegra_dfll, dfll_clk_hw) + +/* mode_name: map numeric DFLL modes to names for friendly console messages */ +static const char * const mode_name[] = { + [DFLL_UNINITIALIZED] = "uninitialized", + [DFLL_DISABLED] = "disabled", + [DFLL_OPEN_LOOP] = "open_loop", + [DFLL_CLOSED_LOOP] = "closed_loop", +}; + +/* + * Register accessors + */ + +static inline u32 dfll_readl(struct tegra_dfll *td, u32 offs) +{ + return __raw_readl(td->base + offs); +} + +static inline void dfll_writel(struct tegra_dfll *td, u32 val, u32 offs) +{ + WARN_ON(offs >= DFLL_I2C_CFG); + __raw_writel(val, td->base + offs); +} + +static inline void dfll_wmb(struct tegra_dfll *td) +{ + dfll_readl(td, DFLL_CTRL); +} + +/* I2C output control registers - for addresses above DFLL_I2C_CFG */ + +static inline u32 dfll_i2c_readl(struct tegra_dfll *td, u32 offs) +{ + return __raw_readl(td->i2c_base + offs); +} + +static inline void dfll_i2c_writel(struct tegra_dfll *td, u32 val, u32 offs) +{ + __raw_writel(val, td->i2c_base + offs); +} + +static inline void dfll_i2c_wmb(struct tegra_dfll *td) +{ + dfll_i2c_readl(td, DFLL_I2C_CFG); +} + +/** + * dfll_is_running - is the DFLL currently generating a clock? + * @td: DFLL instance + * + * If the DFLL is currently generating an output clock signal, return + * true; otherwise return false. + */ +static bool dfll_is_running(struct tegra_dfll *td) +{ + return td->mode >= DFLL_OPEN_LOOP; +} + +/* + * Runtime PM suspend/resume callbacks + */ + +/** + * tegra_dfll_runtime_resume - enable all clocks needed by the DFLL + * @dev: DFLL device * + * + * Enable all clocks needed by the DFLL. Assumes that clk_prepare() + * has already been called on all the clocks. + * + * XXX Should also handle context restore when returning from off. + */ +int tegra_dfll_runtime_resume(struct device *dev) +{ + struct tegra_dfll *td = dev_get_drvdata(dev); + int ret; + + ret = clk_enable(td->ref_clk); + if (ret) { + dev_err(dev, "could not enable ref clock: %d\n", ret); + return ret; + } + + ret = clk_enable(td->soc_clk); + if (ret) { + dev_err(dev, "could not enable register clock: %d\n", ret); + clk_disable(td->ref_clk); + return ret; + } + + ret = clk_enable(td->i2c_clk); + if (ret) { + dev_err(dev, "could not enable i2c clock: %d\n", ret); + clk_disable(td->soc_clk); + clk_disable(td->ref_clk); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(tegra_dfll_runtime_resume); + +/** + * tegra_dfll_runtime_suspend - disable all clocks needed by the DFLL + * @dev: DFLL device * + * + * Disable all clocks needed by the DFLL. Assumes that other code + * will later call clk_unprepare(). + */ +int tegra_dfll_runtime_suspend(struct device *dev) +{ + struct tegra_dfll *td = dev_get_drvdata(dev); + + clk_disable(td->ref_clk); + clk_disable(td->soc_clk); + clk_disable(td->i2c_clk); + + return 0; +} +EXPORT_SYMBOL(tegra_dfll_runtime_suspend); + +/* + * DFLL tuning operations (per-voltage-range tuning settings) + */ + +/** + * dfll_tune_low - tune to DFLL and CPU settings valid for any voltage + * @td: DFLL instance + * + * Tune the DFLL oscillator parameters and the CPU clock shaper for + * the low-voltage range. These settings are valid for any voltage, + * but may not be optimal. + */ +static void dfll_tune_low(struct tegra_dfll *td) +{ + td->tune_range = DFLL_TUNE_LOW; + + dfll_writel(td, td->soc->tune0_low, DFLL_TUNE0); + dfll_writel(td, td->soc->tune1, DFLL_TUNE1); + dfll_wmb(td); + + if (td->soc->set_clock_trimmers_low) + td->soc->set_clock_trimmers_low(); +} + +/* + * Output clock scaler helpers + */ + +/** + * dfll_scale_dvco_rate - calculate scaled rate from the DVCO rate + * @scale_bits: clock scaler value (bits in the DFLL_FREQ_REQ_SCALE field) + * @dvco_rate: the DVCO rate + * + * Apply the same scaling formula that the DFLL hardware uses to scale + * the DVCO rate. + */ +static unsigned long dfll_scale_dvco_rate(int scale_bits, + unsigned long dvco_rate) +{ + return (u64)dvco_rate * (scale_bits + 1) / DFLL_FREQ_REQ_SCALE_MAX; +} + +/* + * Monitor control + */ + +/** + * dfll_calc_monitored_rate - convert DFLL_MONITOR_DATA_VAL rate into real freq + * @monitor_data: value read from the DFLL_MONITOR_DATA_VAL bitfield + * @ref_rate: DFLL reference clock rate + * + * Convert @monitor_data from DFLL_MONITOR_DATA_VAL units into cycles + * per second. Returns the converted value. + */ +static u64 dfll_calc_monitored_rate(u32 monitor_data, + unsigned long ref_rate) +{ + return monitor_data * (ref_rate / REF_CLK_CYC_PER_DVCO_SAMPLE); +} + +/** + * dfll_read_monitor_rate - return the DFLL's output rate from internal monitor + * @td: DFLL instance + * + * If the DFLL is enabled, return the last rate reported by the DFLL's + * internal monitoring hardware. This works in both open-loop and + * closed-loop mode, and takes the output scaler setting into account. + * Assumes that the monitor was programmed to monitor frequency before + * the sample period started. If the driver believes that the DFLL is + * currently uninitialized or disabled, it will return 0, since + * otherwise the DFLL monitor data register will return the last + * measured rate from when the DFLL was active. + */ +static u64 dfll_read_monitor_rate(struct tegra_dfll *td) +{ + u32 v, s; + u64 pre_scaler_rate, post_scaler_rate; + + if (!dfll_is_running(td)) + return 0; + + v = dfll_readl(td, DFLL_MONITOR_DATA); + v = (v & DFLL_MONITOR_DATA_VAL_MASK) >> DFLL_MONITOR_DATA_VAL_SHIFT; + pre_scaler_rate = dfll_calc_monitored_rate(v, td->ref_rate); + + s = dfll_readl(td, DFLL_FREQ_REQ); + s = (s & DFLL_FREQ_REQ_SCALE_MASK) >> DFLL_FREQ_REQ_SCALE_SHIFT; + post_scaler_rate = dfll_scale_dvco_rate(s, pre_scaler_rate); + + return post_scaler_rate; +} + +/* + * DFLL mode switching + */ + +/** + * dfll_set_mode - change the DFLL control mode + * @td: DFLL instance + * @mode: DFLL control mode (see enum dfll_ctrl_mode) + * + * Change the DFLL's operating mode between disabled, open-loop mode, + * and closed-loop mode, or vice versa. + */ +static void dfll_set_mode(struct tegra_dfll *td, + enum dfll_ctrl_mode mode) +{ + td->mode = mode; + dfll_writel(td, mode - 1, DFLL_CTRL); + dfll_wmb(td); +} + +/* + * DFLL-to-I2C controller interface + */ + +/** + * dfll_i2c_set_output_enabled - enable/disable I2C PMIC voltage requests + * @td: DFLL instance + * @enable: whether to enable or disable the I2C voltage requests + * + * Set the master enable control for I2C control value updates. If disabled, + * then I2C control messages are inhibited, regardless of the DFLL mode. + */ +static int dfll_i2c_set_output_enabled(struct tegra_dfll *td, bool enable) +{ + u32 val; + + val = dfll_i2c_readl(td, DFLL_OUTPUT_CFG); + + if (enable) + val |= DFLL_OUTPUT_CFG_I2C_ENABLE; + else + val &= ~DFLL_OUTPUT_CFG_I2C_ENABLE; + + dfll_i2c_writel(td, val, DFLL_OUTPUT_CFG); + dfll_i2c_wmb(td); + + return 0; +} + +/** + * dfll_load_lut - load the voltage lookup table + * @td: struct tegra_dfll * + * + * Load the voltage-to-PMIC register value lookup table into the DFLL + * IP block memory. Look-up tables can be loaded at any time. + */ +static void dfll_load_i2c_lut(struct tegra_dfll *td) +{ + int i, lut_index; + u32 val; + + for (i = 0; i < MAX_DFLL_VOLTAGES; i++) { + if (i < td->lut_min) + lut_index = td->lut_min; + else if (i > td->lut_max) + lut_index = td->lut_max; + else + lut_index = i; + + val = regulator_list_hardware_vsel(td->vdd_reg, + td->i2c_lut[lut_index]); + __raw_writel(val, td->lut_base + i * 4); + } + + dfll_i2c_wmb(td); +} + +/** + * dfll_init_i2c_if - set up the DFLL's DFLL-I2C interface + * @td: DFLL instance + * + * During DFLL driver initialization, program the DFLL-I2C interface + * with the PMU slave address, vdd register offset, and transfer mode. + * This data is used by the DFLL to automatically construct I2C + * voltage-set commands, which are then passed to the DFLL's internal + * I2C controller. + */ +static void dfll_init_i2c_if(struct tegra_dfll *td) +{ + u32 val; + + if (td->i2c_slave_addr > 0x7f) { + val = td->i2c_slave_addr << DFLL_I2C_CFG_SLAVE_ADDR_SHIFT_10BIT; + val |= DFLL_I2C_CFG_SLAVE_ADDR_10; + } else { + val = td->i2c_slave_addr << DFLL_I2C_CFG_SLAVE_ADDR_SHIFT_7BIT; + } + val |= DFLL_I2C_CFG_SIZE_MASK; + val |= DFLL_I2C_CFG_ARB_ENABLE; + dfll_i2c_writel(td, val, DFLL_I2C_CFG); + + dfll_i2c_writel(td, td->i2c_reg, DFLL_I2C_VDD_REG_ADDR); + + val = DIV_ROUND_UP(td->i2c_clk_rate, td->i2c_fs_rate * 8); + BUG_ON(!val || (val > DFLL_I2C_CLK_DIVISOR_MASK)); + val = (val - 1) << DFLL_I2C_CLK_DIVISOR_FS_SHIFT; + + /* default hs divisor just in case */ + val |= 1 << DFLL_I2C_CLK_DIVISOR_HS_SHIFT; + __raw_writel(val, td->i2c_controller_base + DFLL_I2C_CLK_DIVISOR); + dfll_i2c_wmb(td); +} + +/** + * dfll_init_out_if - prepare DFLL-to-PMIC interface + * @td: DFLL instance + * + * During DFLL driver initialization or resume from context loss, + * disable the I2C command output to the PMIC, set safe voltage and + * output limits, and disable and clear limit interrupts. + */ +static void dfll_init_out_if(struct tegra_dfll *td) +{ + u32 val; + + td->lut_min = 0; + td->lut_max = td->i2c_lut_size - 1; + td->lut_safe = td->lut_min + 1; + + dfll_i2c_writel(td, 0, DFLL_OUTPUT_CFG); + val = (td->lut_safe << DFLL_OUTPUT_CFG_SAFE_SHIFT) | + (td->lut_max << DFLL_OUTPUT_CFG_MAX_SHIFT) | + (td->lut_min << DFLL_OUTPUT_CFG_MIN_SHIFT); + dfll_i2c_writel(td, val, DFLL_OUTPUT_CFG); + dfll_i2c_wmb(td); + + dfll_writel(td, 0, DFLL_OUTPUT_FORCE); + dfll_i2c_writel(td, 0, DFLL_INTR_EN); + dfll_i2c_writel(td, DFLL_INTR_MAX_MASK | DFLL_INTR_MIN_MASK, + DFLL_INTR_STS); + + dfll_load_i2c_lut(td); + dfll_init_i2c_if(td); +} + +/* + * Set/get the DFLL's targeted output clock rate + */ + +/** + * find_lut_index_for_rate - determine I2C LUT index for given DFLL rate + * @td: DFLL instance + * @rate: clock rate + * + * Determines the index of a I2C LUT entry for a voltage that approximately + * produces the given DFLL clock rate. This is used when forcing a value + * to the integrator during rate changes. Returns -ENOENT if a suitable + * LUT index is not found. + */ +static int find_lut_index_for_rate(struct tegra_dfll *td, unsigned long rate) +{ + struct dev_pm_opp *opp; + int i, uv; + + rcu_read_lock(); + + opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate); + if (IS_ERR(opp)) { + rcu_read_unlock(); + return PTR_ERR(opp); + } + uv = dev_pm_opp_get_voltage(opp); + + rcu_read_unlock(); + + for (i = 0; i < td->i2c_lut_size; i++) { + if (regulator_list_voltage(td->vdd_reg, td->i2c_lut[i]) == uv) + return i; + } + + return -ENOENT; +} + +/** + * dfll_calculate_rate_request - calculate DFLL parameters for a given rate + * @td: DFLL instance + * @req: DFLL-rate-request structure + * @rate: the desired DFLL rate + * + * Populate the DFLL-rate-request record @req fields with the scale_bits + * and mult_bits fields, based on the target input rate. Returns 0 upon + * success, or -EINVAL if the requested rate in req->rate is too high + * or low for the DFLL to generate. + */ +static int dfll_calculate_rate_request(struct tegra_dfll *td, + struct dfll_rate_req *req, + unsigned long rate) +{ + u32 val; + + /* + * If requested rate is below the minimum DVCO rate, active the scaler. + * In the future the DVCO minimum voltage should be selected based on + * chip temperature and the actual minimum rate should be calibrated + * at runtime. + */ + req->scale_bits = DFLL_FREQ_REQ_SCALE_MAX - 1; + if (rate < td->dvco_rate_min) { + int scale; + + scale = DIV_ROUND_CLOSEST(rate / 1000 * DFLL_FREQ_REQ_SCALE_MAX, + td->dvco_rate_min / 1000); + if (!scale) { + dev_err(td->dev, "%s: Rate %lu is too low\n", + __func__, rate); + return -EINVAL; + } + req->scale_bits = scale - 1; + rate = td->dvco_rate_min; + } + + /* Convert requested rate into frequency request and scale settings */ + val = DVCO_RATE_TO_MULT(rate, td->ref_rate); + if (val > FREQ_MAX) { + dev_err(td->dev, "%s: Rate %lu is above dfll range\n", + __func__, rate); + return -EINVAL; + } + req->mult_bits = val; + req->dvco_target_rate = MULT_TO_DVCO_RATE(req->mult_bits, td->ref_rate); + req->rate = dfll_scale_dvco_rate(req->scale_bits, + req->dvco_target_rate); + req->lut_index = find_lut_index_for_rate(td, req->dvco_target_rate); + if (req->lut_index < 0) + return req->lut_index; + + return 0; +} + +/** + * dfll_set_frequency_request - start the frequency change operation + * @td: DFLL instance + * @req: rate request structure + * + * Tell the DFLL to try to change its output frequency to the + * frequency represented by @req. DFLL must be in closed-loop mode. + */ +static void dfll_set_frequency_request(struct tegra_dfll *td, + struct dfll_rate_req *req) +{ + u32 val = 0; + int force_val; + int coef = 128; /* FIXME: td->cg_scale? */; + + force_val = (req->lut_index - td->lut_safe) * coef / td->cg; + force_val = clamp(force_val, FORCE_MIN, FORCE_MAX); + + val |= req->mult_bits << DFLL_FREQ_REQ_MULT_SHIFT; + val |= req->scale_bits << DFLL_FREQ_REQ_SCALE_SHIFT; + val |= ((u32)force_val << DFLL_FREQ_REQ_FORCE_SHIFT) & + DFLL_FREQ_REQ_FORCE_MASK; + val |= DFLL_FREQ_REQ_FREQ_VALID | DFLL_FREQ_REQ_FORCE_ENABLE; + + dfll_writel(td, val, DFLL_FREQ_REQ); + dfll_wmb(td); +} + +/** + * tegra_dfll_request_rate - set the next rate for the DFLL to tune to + * @td: DFLL instance + * @rate: clock rate to target + * + * Convert the requested clock rate @rate into the DFLL control logic + * settings. In closed-loop mode, update new settings immediately to + * adjust DFLL output rate accordingly. Otherwise, just save them + * until the next switch to closed loop. Returns 0 upon success, + * -EPERM if the DFLL driver has not yet been initialized, or -EINVAL + * if @rate is outside the DFLL's tunable range. + */ +static int dfll_request_rate(struct tegra_dfll *td, unsigned long rate) +{ + int ret; + struct dfll_rate_req req; + + if (td->mode == DFLL_UNINITIALIZED) { + dev_err(td->dev, "%s: Cannot set DFLL rate in %s mode\n", + __func__, mode_name[td->mode]); + return -EPERM; + } + + ret = dfll_calculate_rate_request(td, &req, rate); + if (ret) + return ret; + + td->last_unrounded_rate = rate; + td->last_req = req; + + if (td->mode == DFLL_CLOSED_LOOP) + dfll_set_frequency_request(td, &td->last_req); + + return 0; +} + +/* + * DFLL enable/disable & open-loop <-> closed-loop transitions + */ + +/** + * dfll_disable - switch from open-loop mode to disabled mode + * @td: DFLL instance + * + * Switch from OPEN_LOOP state to DISABLED state. Returns 0 upon success + * or -EPERM if the DFLL is not currently in open-loop mode. + */ +static int dfll_disable(struct tegra_dfll *td) +{ + if (td->mode != DFLL_OPEN_LOOP) { + dev_err(td->dev, "cannot disable DFLL in %s mode\n", + mode_name[td->mode]); + return -EINVAL; + } + + dfll_set_mode(td, DFLL_DISABLED); + pm_runtime_put_sync(td->dev); + + return 0; +} + +/** + * dfll_enable - switch a disabled DFLL to open-loop mode + * @td: DFLL instance + * + * Switch from DISABLED state to OPEN_LOOP state. Returns 0 upon success + * or -EPERM if the DFLL is not currently disabled. + */ +static int dfll_enable(struct tegra_dfll *td) +{ + if (td->mode != DFLL_DISABLED) { + dev_err(td->dev, "cannot enable DFLL in %s mode\n", + mode_name[td->mode]); + return -EPERM; + } + + pm_runtime_get_sync(td->dev); + dfll_set_mode(td, DFLL_OPEN_LOOP); + + return 0; +} + +/** + * dfll_set_open_loop_config - prepare to switch to open-loop mode + * @td: DFLL instance + * + * Prepare to switch the DFLL to open-loop mode. This switches the + * DFLL to the low-voltage tuning range, ensures that I2C output + * forcing is disabled, and disables the output clock rate scaler. + * The DFLL's low-voltage tuning range parameters must be + * characterized to keep the downstream device stable at any DVCO + * input voltage. No return value. + */ +static void dfll_set_open_loop_config(struct tegra_dfll *td) +{ + u32 val; + + /* always tune low (safe) in open loop */ + if (td->tune_range != DFLL_TUNE_LOW) + dfll_tune_low(td); + + val = dfll_readl(td, DFLL_FREQ_REQ); + val |= DFLL_FREQ_REQ_SCALE_MASK; + val &= ~DFLL_FREQ_REQ_FORCE_ENABLE; + dfll_writel(td, val, DFLL_FREQ_REQ); + dfll_wmb(td); +} + +/** + * tegra_dfll_lock - switch from open-loop to closed-loop mode + * @td: DFLL instance + * + * Switch from OPEN_LOOP state to CLOSED_LOOP state. Returns 0 upon success, + * -EINVAL if the DFLL's target rate hasn't been set yet, or -EPERM if the + * DFLL is not currently in open-loop mode. + */ +static int dfll_lock(struct tegra_dfll *td) +{ + struct dfll_rate_req *req = &td->last_req; + + switch (td->mode) { + case DFLL_CLOSED_LOOP: + return 0; + + case DFLL_OPEN_LOOP: + if (req->rate == 0) { + dev_err(td->dev, "%s: Cannot lock DFLL at rate 0\n", + __func__); + return -EINVAL; + } + + dfll_i2c_set_output_enabled(td, true); + dfll_set_mode(td, DFLL_CLOSED_LOOP); + dfll_set_frequency_request(td, req); + return 0; + + default: + BUG_ON(td->mode > DFLL_CLOSED_LOOP); + dev_err(td->dev, "%s: Cannot lock DFLL in %s mode\n", + __func__, mode_name[td->mode]); + return -EPERM; + } +} + +/** + * tegra_dfll_unlock - switch from closed-loop to open-loop mode + * @td: DFLL instance + * + * Switch from CLOSED_LOOP state to OPEN_LOOP state. Returns 0 upon success, + * or -EPERM if the DFLL is not currently in open-loop mode. + */ +static int dfll_unlock(struct tegra_dfll *td) +{ + switch (td->mode) { + case DFLL_CLOSED_LOOP: + dfll_set_open_loop_config(td); + dfll_set_mode(td, DFLL_OPEN_LOOP); + dfll_i2c_set_output_enabled(td, false); + return 0; + + case DFLL_OPEN_LOOP: + return 0; + + default: + BUG_ON(td->mode > DFLL_CLOSED_LOOP); + dev_err(td->dev, "%s: Cannot unlock DFLL in %s mode\n", + __func__, mode_name[td->mode]); + return -EPERM; + } +} + +/* + * Clock framework integration + * + * When the DFLL is being controlled by the CCF, always enter closed loop + * mode when the clk is enabled. This requires that a DFLL rate request + * has been set beforehand, which implies that a clk_set_rate() call is + * always required before a clk_enable(). + */ + +static int dfll_clk_is_enabled(struct clk_hw *hw) +{ + struct tegra_dfll *td = clk_hw_to_dfll(hw); + + return dfll_is_running(td); +} + +static int dfll_clk_enable(struct clk_hw *hw) +{ + struct tegra_dfll *td = clk_hw_to_dfll(hw); + int ret; + + ret = dfll_enable(td); + if (ret) + return ret; + + ret = dfll_lock(td); + if (ret) + dfll_disable(td); + + return ret; +} + +static void dfll_clk_disable(struct clk_hw *hw) +{ + struct tegra_dfll *td = clk_hw_to_dfll(hw); + int ret; + + ret = dfll_unlock(td); + if (!ret) + dfll_disable(td); +} + +static unsigned long dfll_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct tegra_dfll *td = clk_hw_to_dfll(hw); + + return td->last_unrounded_rate; +} + +static long dfll_clk_round_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long *parent_rate) +{ + struct tegra_dfll *td = clk_hw_to_dfll(hw); + struct dfll_rate_req req; + int ret; + + ret = dfll_calculate_rate_request(td, &req, rate); + if (ret) + return ret; + + /* + * Don't return the rounded rate, since it doesn't really matter as + * the output rate will be voltage controlled anyway, and cpufreq + * freaks out if any rounding happens. + */ + return rate; +} + +static int dfll_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct tegra_dfll *td = clk_hw_to_dfll(hw); + + return dfll_request_rate(td, rate); +} + +static const struct clk_ops dfll_clk_ops = { + .is_enabled = dfll_clk_is_enabled, + .enable = dfll_clk_enable, + .disable = dfll_clk_disable, + .recalc_rate = dfll_clk_recalc_rate, + .round_rate = dfll_clk_round_rate, + .set_rate = dfll_clk_set_rate, +}; + +static struct clk_init_data dfll_clk_init_data = { + .flags = CLK_IS_ROOT, + .ops = &dfll_clk_ops, + .num_parents = 0, +}; + +/** + * dfll_register_clk - register the DFLL output clock with the clock framework + * @td: DFLL instance + * + * Register the DFLL's output clock with the Linux clock framework and register + * the DFLL driver as an OF clock provider. Returns 0 upon success or -EINVAL + * or -ENOMEM upon failure. + */ +static int dfll_register_clk(struct tegra_dfll *td) +{ + int ret; + + dfll_clk_init_data.name = td->output_clock_name; + td->dfll_clk_hw.init = &dfll_clk_init_data; + + td->dfll_clk = clk_register(td->dev, &td->dfll_clk_hw); + if (IS_ERR(td->dfll_clk)) { + dev_err(td->dev, "DFLL clock registration error\n"); + return -EINVAL; + } + + ret = of_clk_add_provider(td->dev->of_node, of_clk_src_simple_get, + td->dfll_clk); + if (ret) { + dev_err(td->dev, "of_clk_add_provider() failed\n"); + + clk_unregister(td->dfll_clk); + return ret; + } + + return 0; +} + +/** + * dfll_unregister_clk - unregister the DFLL output clock + * @td: DFLL instance + * + * Unregister the DFLL's output clock from the Linux clock framework + * and from clkdev. No return value. + */ +static void dfll_unregister_clk(struct tegra_dfll *td) +{ + of_clk_del_provider(td->dev->of_node); + clk_unregister(td->dfll_clk); + td->dfll_clk = NULL; +} + +/* + * Debugfs interface + */ + +#ifdef CONFIG_DEBUG_FS + +static int attr_enable_get(void *data, u64 *val) +{ + struct tegra_dfll *td = data; + + *val = dfll_is_running(td); + + return 0; +} +static int attr_enable_set(void *data, u64 val) +{ + struct tegra_dfll *td = data; + + return val ? dfll_enable(td) : dfll_disable(td); +} +DEFINE_SIMPLE_ATTRIBUTE(enable_fops, attr_enable_get, attr_enable_set, + "%llu\n"); + +static int attr_lock_get(void *data, u64 *val) +{ + struct tegra_dfll *td = data; + + *val = (td->mode == DFLL_CLOSED_LOOP); + + return 0; +} +static int attr_lock_set(void *data, u64 val) +{ + struct tegra_dfll *td = data; + + return val ? dfll_lock(td) : dfll_unlock(td); +} +DEFINE_SIMPLE_ATTRIBUTE(lock_fops, attr_lock_get, attr_lock_set, + "%llu\n"); + +static int attr_rate_get(void *data, u64 *val) +{ + struct tegra_dfll *td = data; + + *val = dfll_read_monitor_rate(td); + + return 0; +} + +static int attr_rate_set(void *data, u64 val) +{ + struct tegra_dfll *td = data; + + return dfll_request_rate(td, val); +} +DEFINE_SIMPLE_ATTRIBUTE(rate_fops, attr_rate_get, attr_rate_set, "%llu\n"); + +static int attr_registers_show(struct seq_file *s, void *data) +{ + u32 val, offs; + struct tegra_dfll *td = s->private; + + seq_puts(s, "CONTROL REGISTERS:\n"); + for (offs = 0; offs <= DFLL_MONITOR_DATA; offs += 4) { + if (offs == DFLL_OUTPUT_CFG) + val = dfll_i2c_readl(td, offs); + else + val = dfll_readl(td, offs); + seq_printf(s, "[0x%02x] = 0x%08x\n", offs, val); + } + + seq_puts(s, "\nI2C and INTR REGISTERS:\n"); + for (offs = DFLL_I2C_CFG; offs <= DFLL_I2C_STS; offs += 4) + seq_printf(s, "[0x%02x] = 0x%08x\n", offs, + dfll_i2c_readl(td, offs)); + for (offs = DFLL_INTR_STS; offs <= DFLL_INTR_EN; offs += 4) + seq_printf(s, "[0x%02x] = 0x%08x\n", offs, + dfll_i2c_readl(td, offs)); + + seq_puts(s, "\nINTEGRATED I2C CONTROLLER REGISTERS:\n"); + offs = DFLL_I2C_CLK_DIVISOR; + seq_printf(s, "[0x%02x] = 0x%08x\n", offs, + __raw_readl(td->i2c_controller_base + offs)); + + seq_puts(s, "\nLUT:\n"); + for (offs = 0; offs < 4 * MAX_DFLL_VOLTAGES; offs += 4) + seq_printf(s, "[0x%02x] = 0x%08x\n", offs, + __raw_readl(td->lut_base + offs)); + + return 0; +} + +static int attr_registers_open(struct inode *inode, struct file *file) +{ + return single_open(file, attr_registers_show, inode->i_private); +} + +static const struct file_operations attr_registers_fops = { + .open = attr_registers_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int dfll_debug_init(struct tegra_dfll *td) +{ + int ret; + + if (!td || (td->mode == DFLL_UNINITIALIZED)) + return 0; + + td->debugfs_dir = debugfs_create_dir("tegra_dfll_fcpu", NULL); + if (!td->debugfs_dir) + return -ENOMEM; + + ret = -ENOMEM; + + if (!debugfs_create_file("enable", S_IRUGO | S_IWUSR, + td->debugfs_dir, td, &enable_fops)) + goto err_out; + + if (!debugfs_create_file("lock", S_IRUGO, + td->debugfs_dir, td, &lock_fops)) + goto err_out; + + if (!debugfs_create_file("rate", S_IRUGO, + td->debugfs_dir, td, &rate_fops)) + goto err_out; + + if (!debugfs_create_file("registers", S_IRUGO, + td->debugfs_dir, td, &attr_registers_fops)) + goto err_out; + + return 0; + +err_out: + debugfs_remove_recursive(td->debugfs_dir); + return ret; +} + +#endif /* CONFIG_DEBUG_FS */ + +/* + * DFLL initialization + */ + +/** + * dfll_set_default_params - program non-output related DFLL parameters + * @td: DFLL instance + * + * During DFLL driver initialization or resume from context loss, + * program parameters for the closed loop integrator, DVCO tuning, + * voltage droop control and monitor control. + */ +static void dfll_set_default_params(struct tegra_dfll *td) +{ + u32 val; + + val = DIV_ROUND_UP(td->ref_rate, td->sample_rate * 32); + BUG_ON(val > DFLL_CONFIG_DIV_MASK); + dfll_writel(td, val, DFLL_CONFIG); + + val = (td->force_mode << DFLL_PARAMS_FORCE_MODE_SHIFT) | + (td->cf << DFLL_PARAMS_CF_PARAM_SHIFT) | + (td->ci << DFLL_PARAMS_CI_PARAM_SHIFT) | + (td->cg << DFLL_PARAMS_CG_PARAM_SHIFT) | + (td->cg_scale ? DFLL_PARAMS_CG_SCALE : 0); + dfll_writel(td, val, DFLL_PARAMS); + + dfll_tune_low(td); + dfll_writel(td, td->droop_ctrl, DFLL_DROOP_CTRL); + dfll_writel(td, DFLL_MONITOR_CTRL_FREQ, DFLL_MONITOR_CTRL); +} + +/** + * dfll_init_clks - clk_get() the DFLL source clocks + * @td: DFLL instance + * + * Call clk_get() on the DFLL source clocks and save the pointers for later + * use. Returns 0 upon success or error (see devm_clk_get) if one or more + * of the clocks couldn't be looked up. + */ +static int dfll_init_clks(struct tegra_dfll *td) +{ + td->ref_clk = devm_clk_get(td->dev, "ref"); + if (IS_ERR(td->ref_clk)) { + dev_err(td->dev, "missing ref clock\n"); + return PTR_ERR(td->ref_clk); + } + + td->soc_clk = devm_clk_get(td->dev, "soc"); + if (IS_ERR(td->soc_clk)) { + dev_err(td->dev, "missing soc clock\n"); + return PTR_ERR(td->soc_clk); + } + + td->i2c_clk = devm_clk_get(td->dev, "i2c"); + if (IS_ERR(td->i2c_clk)) { + dev_err(td->dev, "missing i2c clock\n"); + return PTR_ERR(td->i2c_clk); + } + td->i2c_clk_rate = clk_get_rate(td->i2c_clk); + + return 0; +} + +/** + * dfll_init - Prepare the DFLL IP block for use + * @td: DFLL instance + * + * Do everything necessary to prepare the DFLL IP block for use. The + * DFLL will be left in DISABLED state. Called by dfll_probe(). + * Returns 0 upon success, or passes along the error from whatever + * function returned it. + */ +static int dfll_init(struct tegra_dfll *td) +{ + int ret; + + td->ref_rate = clk_get_rate(td->ref_clk); + if (td->ref_rate != REF_CLOCK_RATE) { + dev_err(td->dev, "unexpected ref clk rate %lu, expecting %lu", + td->ref_rate, REF_CLOCK_RATE); + return -EINVAL; + } + + reset_control_deassert(td->dvco_rst); + + ret = clk_prepare(td->ref_clk); + if (ret) { + dev_err(td->dev, "failed to prepare ref_clk\n"); + return ret; + } + + ret = clk_prepare(td->soc_clk); + if (ret) { + dev_err(td->dev, "failed to prepare soc_clk\n"); + goto di_err1; + } + + ret = clk_prepare(td->i2c_clk); + if (ret) { + dev_err(td->dev, "failed to prepare i2c_clk\n"); + goto di_err2; + } + + td->last_unrounded_rate = 0; + + pm_runtime_enable(td->dev); + pm_runtime_get_sync(td->dev); + + dfll_set_mode(td, DFLL_DISABLED); + dfll_set_default_params(td); + + if (td->soc->init_clock_trimmers) + td->soc->init_clock_trimmers(); + + dfll_set_open_loop_config(td); + + dfll_init_out_if(td); + + pm_runtime_put_sync(td->dev); + + return 0; + +di_err2: + clk_unprepare(td->soc_clk); +di_err1: + clk_unprepare(td->ref_clk); + + reset_control_assert(td->dvco_rst); + + return ret; +} + +/* + * DT data fetch + */ + +/* + * Find a PMIC voltage register-to-voltage mapping for the given voltage. + * An exact voltage match is required. + */ +static int find_vdd_map_entry_exact(struct tegra_dfll *td, int uV) +{ + int i, n_voltages, reg_uV; + + n_voltages = regulator_count_voltages(td->vdd_reg); + for (i = 0; i < n_voltages; i++) { + reg_uV = regulator_list_voltage(td->vdd_reg, i); + if (reg_uV < 0) + break; + + if (uV == reg_uV) + return i; + } + + dev_err(td->dev, "no voltage map entry for %d uV\n", uV); + return -EINVAL; +} + +/* + * Find a PMIC voltage register-to-voltage mapping for the given voltage, + * rounding up to the closest supported voltage. + * */ +static int find_vdd_map_entry_min(struct tegra_dfll *td, int uV) +{ + int i, n_voltages, reg_uV; + + n_voltages = regulator_count_voltages(td->vdd_reg); + for (i = 0; i < n_voltages; i++) { + reg_uV = regulator_list_voltage(td->vdd_reg, i); + if (reg_uV < 0) + break; + + if (uV <= reg_uV) + return i; + } + + dev_err(td->dev, "no voltage map entry rounding to %d uV\n", uV); + return -EINVAL; +} + +/** + * dfll_build_i2c_lut - build the I2C voltage register lookup table + * @td: DFLL instance + * + * The DFLL hardware has 33 bytes of look-up table RAM that must be filled with + * PMIC voltage register values that span the entire DFLL operating range. + * This function builds the look-up table based on the OPP table provided by + * the soc-specific platform driver (td->soc->opp_dev) and the PMIC + * register-to-voltage mapping queried from the regulator framework. + * + * On success, fills in td->i2c_lut and returns 0, or -err on failure. + */ +static int dfll_build_i2c_lut(struct tegra_dfll *td) +{ + int ret = -EINVAL; + int j, v, v_max, v_opp; + int selector; + unsigned long rate; + struct dev_pm_opp *opp; + int lut; + + rcu_read_lock(); + + rate = ULONG_MAX; + opp = dev_pm_opp_find_freq_floor(td->soc->dev, &rate); + if (IS_ERR(opp)) { + dev_err(td->dev, "couldn't get vmax opp, empty opp table?\n"); + goto out; + } + v_max = dev_pm_opp_get_voltage(opp); + + v = td->soc->min_millivolts * 1000; + lut = find_vdd_map_entry_exact(td, v); + if (lut < 0) + goto out; + td->i2c_lut[0] = lut; + + for (j = 1, rate = 0; ; rate++) { + opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate); + if (IS_ERR(opp)) + break; + v_opp = dev_pm_opp_get_voltage(opp); + + if (v_opp <= td->soc->min_millivolts * 1000) + td->dvco_rate_min = dev_pm_opp_get_freq(opp); + + for (;;) { + v += max(1, (v_max - v) / (MAX_DFLL_VOLTAGES - j)); + if (v >= v_opp) + break; + + selector = find_vdd_map_entry_min(td, v); + if (selector < 0) + goto out; + if (selector != td->i2c_lut[j - 1]) + td->i2c_lut[j++] = selector; + } + + v = (j == MAX_DFLL_VOLTAGES - 1) ? v_max : v_opp; + selector = find_vdd_map_entry_exact(td, v); + if (selector < 0) + goto out; + if (selector != td->i2c_lut[j - 1]) + td->i2c_lut[j++] = selector; + + if (v >= v_max) + break; + } + td->i2c_lut_size = j; + + if (!td->dvco_rate_min) + dev_err(td->dev, "no opp above DFLL minimum voltage %d mV\n", + td->soc->min_millivolts); + else + ret = 0; + +out: + rcu_read_unlock(); + + return ret; +} + +/** + * read_dt_param - helper function for reading required parameters from the DT + * @td: DFLL instance + * @param: DT property name + * @dest: output pointer for the value read + * + * Read a required numeric parameter from the DFLL device node, or complain + * if the property doesn't exist. Returns a boolean indicating success for + * easy chaining of multiple calls to this function. + */ +static bool read_dt_param(struct tegra_dfll *td, const char *param, u32 *dest) +{ + int err = of_property_read_u32(td->dev->of_node, param, dest); + + if (err < 0) { + dev_err(td->dev, "failed to read DT parameter %s: %d\n", + param, err); + return false; + } + + return true; +} + +/** + * dfll_fetch_i2c_params - query PMIC I2C params from DT & regulator subsystem + * @td: DFLL instance + * + * Read all the parameters required for operation in I2C mode. The parameters + * can originate from the device tree or the regulator subsystem. + * Returns 0 on success or -err on failure. + */ +static int dfll_fetch_i2c_params(struct tegra_dfll *td) +{ + struct regmap *regmap; + struct device *i2c_dev; + struct i2c_client *i2c_client; + int vsel_reg, vsel_mask; + int ret; + + if (!read_dt_param(td, "nvidia,i2c-fs-rate", &td->i2c_fs_rate)) + return -EINVAL; + + regmap = regulator_get_regmap(td->vdd_reg); + i2c_dev = regmap_get_device(regmap); + i2c_client = to_i2c_client(i2c_dev); + + td->i2c_slave_addr = i2c_client->addr; + + ret = regulator_get_hardware_vsel_register(td->vdd_reg, + &vsel_reg, + &vsel_mask); + if (ret < 0) { + dev_err(td->dev, + "regulator unsuitable for DFLL I2C operation\n"); + return -EINVAL; + } + td->i2c_reg = vsel_reg; + + ret = dfll_build_i2c_lut(td); + if (ret) { + dev_err(td->dev, "couldn't build I2C LUT\n"); + return ret; + } + + return 0; +} + +/** + * dfll_fetch_common_params - read DFLL parameters from the device tree + * @td: DFLL instance + * + * Read all the DT parameters that are common to both I2C and PWM operation. + * Returns 0 on success or -EINVAL on any failure. + */ +static int dfll_fetch_common_params(struct tegra_dfll *td) +{ + bool ok = true; + + ok &= read_dt_param(td, "nvidia,droop-ctrl", &td->droop_ctrl); + ok &= read_dt_param(td, "nvidia,sample-rate", &td->sample_rate); + ok &= read_dt_param(td, "nvidia,force-mode", &td->force_mode); + ok &= read_dt_param(td, "nvidia,cf", &td->cf); + ok &= read_dt_param(td, "nvidia,ci", &td->ci); + ok &= read_dt_param(td, "nvidia,cg", &td->cg); + td->cg_scale = of_property_read_bool(td->dev->of_node, + "nvidia,cg-scale"); + + if (of_property_read_string(td->dev->of_node, "clock-output-names", + &td->output_clock_name)) { + dev_err(td->dev, "missing clock-output-names property\n"); + ok = false; + } + + return ok ? 0 : -EINVAL; +} + +/* + * API exported to per-SoC platform drivers + */ + +/** + * tegra_dfll_register - probe a Tegra DFLL device + * @pdev: DFLL platform_device * + * @soc: Per-SoC integration and characterization data for this DFLL instance + * + * Probe and initialize a DFLL device instance. Intended to be called + * by a SoC-specific shim driver that passes in per-SoC integration + * and configuration data via @soc. Returns 0 on success or -err on failure. + */ +int tegra_dfll_register(struct platform_device *pdev, + struct tegra_dfll_soc_data *soc) +{ + struct resource *mem; + struct tegra_dfll *td; + int ret; + + if (!soc) { + dev_err(&pdev->dev, "no tegra_dfll_soc_data provided\n"); + return -EINVAL; + } + + td = devm_kzalloc(&pdev->dev, sizeof(*td), GFP_KERNEL); + if (!td) + return -ENOMEM; + td->dev = &pdev->dev; + platform_set_drvdata(pdev, td); + + td->soc = soc; + + td->vdd_reg = devm_regulator_get(td->dev, "vdd-cpu"); + if (IS_ERR(td->vdd_reg)) { + dev_err(td->dev, "couldn't get vdd_cpu regulator\n"); + return PTR_ERR(td->vdd_reg); + } + + td->dvco_rst = devm_reset_control_get(td->dev, "dvco"); + if (IS_ERR(td->dvco_rst)) { + dev_err(td->dev, "couldn't get dvco reset\n"); + return PTR_ERR(td->dvco_rst); + } + + ret = dfll_fetch_common_params(td); + if (ret) { + dev_err(td->dev, "couldn't parse device tree parameters\n"); + return ret; + } + + ret = dfll_fetch_i2c_params(td); + if (ret) + return ret; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(td->dev, "no control register resource\n"); + return -ENODEV; + } + + td->base = devm_ioremap(td->dev, mem->start, resource_size(mem)); + if (!td->base) { + dev_err(td->dev, "couldn't ioremap DFLL control registers\n"); + return -ENODEV; + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!mem) { + dev_err(td->dev, "no i2c_base resource\n"); + return -ENODEV; + } + + td->i2c_base = devm_ioremap(td->dev, mem->start, resource_size(mem)); + if (!td->i2c_base) { + dev_err(td->dev, "couldn't ioremap i2c_base resource\n"); + return -ENODEV; + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 2); + if (!mem) { + dev_err(td->dev, "no i2c_controller_base resource\n"); + return -ENODEV; + } + + td->i2c_controller_base = devm_ioremap(td->dev, mem->start, + resource_size(mem)); + if (!td->i2c_controller_base) { + dev_err(td->dev, + "couldn't ioremap i2c_controller_base resource\n"); + return -ENODEV; + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 3); + if (!mem) { + dev_err(td->dev, "no lut_base resource\n"); + return -ENODEV; + } + + td->lut_base = devm_ioremap(td->dev, mem->start, resource_size(mem)); + if (!td->lut_base) { + dev_err(td->dev, + "couldn't ioremap lut_base resource\n"); + return -ENODEV; + } + + ret = dfll_init_clks(td); + if (ret) { + dev_err(&pdev->dev, "DFLL clock init error\n"); + return ret; + } + + /* Enable the clocks and set the device up */ + ret = dfll_init(td); + if (ret) + return ret; + + ret = dfll_register_clk(td); + if (ret) { + dev_err(&pdev->dev, "DFLL clk registration failed\n"); + return ret; + } + +#ifdef CONFIG_DEBUG_FS + dfll_debug_init(td); +#endif + + return 0; +} +EXPORT_SYMBOL(tegra_dfll_register); + +/** + * tegra_dfll_unregister - release all of the DFLL driver resources for a device + * @pdev: DFLL platform_device * + * + * Unbind this driver from the DFLL hardware device represented by + * @pdev. The DFLL must be disabled for this to succeed. Returns 0 + * upon success or -EBUSY if the DFLL is still active. + */ +int tegra_dfll_unregister(struct platform_device *pdev) +{ + struct tegra_dfll *td = platform_get_drvdata(pdev); + + /* Try to prevent removal while the DFLL is active */ + if (td->mode != DFLL_DISABLED) { + dev_err(&pdev->dev, + "must disable DFLL before removing driver\n"); + return -EBUSY; + } + + debugfs_remove_recursive(td->debugfs_dir); + + dfll_unregister_clk(td); + pm_runtime_disable(&pdev->dev); + + clk_unprepare(td->ref_clk); + clk_unprepare(td->soc_clk); + clk_unprepare(td->i2c_clk); + + reset_control_assert(td->dvco_rst); + + return 0; +} +EXPORT_SYMBOL(tegra_dfll_unregister); diff --git a/drivers/clk/tegra/clk-dfll.h b/drivers/clk/tegra/clk-dfll.h new file mode 100644 index 000000000..2e4c0772a --- /dev/null +++ b/drivers/clk/tegra/clk-dfll.h @@ -0,0 +1,54 @@ +/* + * clk-dfll.h - prototypes and macros for the Tegra DFLL clocksource driver + * Copyright (C) 2013 NVIDIA Corporation. All rights reserved. + * + * Aleksandr Frid + * Paul Walmsley + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __DRIVERS_CLK_TEGRA_CLK_DFLL_H +#define __DRIVERS_CLK_TEGRA_CLK_DFLL_H + +#include +#include +#include + +/** + * struct tegra_dfll_soc_data - SoC-specific hooks/integration for the DFLL driver + * @opp_dev: struct device * that holds the OPP table for the DFLL + * @min_millivolts: minimum voltage (in mV) that the DFLL can operate + * @tune0_low: DFLL tuning register 0 (low voltage range) + * @tune0_high: DFLL tuning register 0 (high voltage range) + * @tune1: DFLL tuning register 1 + * @assert_dvco_reset: fn ptr to place the DVCO in reset + * @deassert_dvco_reset: fn ptr to release the DVCO reset + * @set_clock_trimmers_high: fn ptr to tune clock trimmers for high voltage + * @set_clock_trimmers_low: fn ptr to tune clock trimmers for low voltage + */ +struct tegra_dfll_soc_data { + struct device *dev; + unsigned int min_millivolts; + u32 tune0_low; + u32 tune0_high; + u32 tune1; + void (*init_clock_trimmers)(void); + void (*set_clock_trimmers_high)(void); + void (*set_clock_trimmers_low)(void); +}; + +int tegra_dfll_register(struct platform_device *pdev, + struct tegra_dfll_soc_data *soc); +int tegra_dfll_unregister(struct platform_device *pdev); +int tegra_dfll_runtime_suspend(struct device *dev); +int tegra_dfll_runtime_resume(struct device *dev); + +#endif /* __DRIVERS_CLK_TEGRA_CLK_DFLL_H */ diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c index 59a5714df..48c83efda 100644 --- a/drivers/clk/tegra/clk-divider.c +++ b/drivers/clk/tegra/clk-divider.c @@ -19,7 +19,6 @@ #include #include #include -#include #include "clk.h" diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c index 7649685c8..138a94b99 100644 --- a/drivers/clk/tegra/clk-emc.c +++ b/drivers/clk/tegra/clk-emc.c @@ -103,7 +103,7 @@ static unsigned long emc_recalc_rate(struct clk_hw *hw, * CCF wrongly assumes that the parent won't change during set_rate, * so get the parent rate explicitly. */ - parent_rate = __clk_get_rate(__clk_get_parent(hw->clk)); + parent_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); val = readl(tegra->clk_regs + CLK_SOURCE_EMC); div = val & CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK; @@ -116,11 +116,7 @@ static unsigned long emc_recalc_rate(struct clk_hw *hw, * safer since things have EMC rate floors. Also don't touch parent_rate * since we don't want the CCF to play with our parent clocks. */ -static long emc_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_hw) +static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { struct tegra_clk_emc *tegra; u8 ram_code = tegra_read_ram_code(); @@ -135,22 +131,28 @@ static long emc_determine_rate(struct clk_hw *hw, unsigned long rate, timing = tegra->timings + i; - if (timing->rate > max_rate) { + if (timing->rate > req->max_rate) { i = min(i, 1); - return tegra->timings[i - 1].rate; + req->rate = tegra->timings[i - 1].rate; + return 0; } - if (timing->rate < min_rate) + if (timing->rate < req->min_rate) continue; - if (timing->rate >= rate) - return timing->rate; + if (timing->rate >= req->rate) { + req->rate = timing->rate; + return 0; + } } - if (timing) - return timing->rate; + if (timing) { + req->rate = timing->rate; + return 0; + } - return __clk_get_rate(hw->clk); + req->rate = clk_hw_get_rate(hw); + return 0; } static u8 emc_get_parent(struct clk_hw *hw) @@ -312,7 +314,7 @@ static int emc_set_rate(struct clk_hw *hw, unsigned long rate, tegra = container_of(hw, struct tegra_clk_emc, hw); - if (__clk_get_rate(hw->clk) == rate) + if (clk_hw_get_rate(hw) == rate) return 0; /* @@ -525,8 +527,8 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np, if (IS_ERR(clk)) return clk; - tegra->prev_parent = clk_get_parent_by_index( - tegra->hw.clk, emc_get_parent(&tegra->hw)); + tegra->prev_parent = clk_hw_get_parent_by_index( + &tegra->hw, emc_get_parent(&tegra->hw))->clk; tegra->changing_timing = false; /* Allow debugging tools to see the EMC clock */ diff --git a/drivers/clk/tegra/clk-periph-gate.c b/drivers/clk/tegra/clk-periph-gate.c index 0aa8830ae..d28d6e950 100644 --- a/drivers/clk/tegra/clk-periph-gate.c +++ b/drivers/clk/tegra/clk-periph-gate.c @@ -14,7 +14,6 @@ * along with this program. If not, see . */ -#include #include #include #include diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c index d84ae49d0..ec5b6113b 100644 --- a/drivers/clk/tegra/clk-periph.c +++ b/drivers/clk/tegra/clk-periph.c @@ -14,7 +14,6 @@ * along with this program. If not, see . */ -#include #include #include #include diff --git a/drivers/clk/tegra/clk-pll-out.c b/drivers/clk/tegra/clk-pll-out.c index 3598987a4..257cae0c1 100644 --- a/drivers/clk/tegra/clk-pll-out.c +++ b/drivers/clk/tegra/clk-pll-out.c @@ -20,7 +20,6 @@ #include #include #include -#include #include "clk.h" diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c index 05c6d08a6..d6d4ecb88 100644 --- a/drivers/clk/tegra/clk-pll.c +++ b/drivers/clk/tegra/clk-pll.c @@ -18,8 +18,8 @@ #include #include #include -#include #include +#include #include "clk.h" @@ -264,7 +264,7 @@ static int clk_pll_wait_for_lock(struct tegra_clk_pll *pll) } pr_err("%s: Timed out waiting for pll %s lock\n", __func__, - __clk_get_name(pll->hw.clk)); + clk_hw_get_name(&pll->hw)); return -1; } @@ -595,7 +595,7 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, if (pll->params->flags & TEGRA_PLL_FIXED) { if (rate != pll->params->fixed_rate) { pr_err("%s: Can not change %s fixed rate %lu to %lu\n", - __func__, __clk_get_name(hw->clk), + __func__, clk_hw_get_name(hw), pll->params->fixed_rate, rate); return -EINVAL; } @@ -605,7 +605,7 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, if (_get_table_rate(hw, &cfg, rate, parent_rate) && _calc_rate(hw, &cfg, rate, parent_rate)) { pr_err("%s: Failed to set %s rate %lu\n", __func__, - __clk_get_name(hw->clk), rate); + clk_hw_get_name(hw), rate); WARN_ON(1); return -EINVAL; } @@ -634,7 +634,7 @@ static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate, /* PLLM is used for memory; we do not change rate */ if (pll->params->flags & TEGRA_PLLM) - return __clk_get_rate(hw->clk); + return clk_hw_get_rate(hw); if (_get_table_rate(hw, &cfg, rate, *prate) && _calc_rate(hw, &cfg, rate, *prate)) @@ -663,7 +663,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, if (_get_table_rate(hw, &sel, pll->params->fixed_rate, parent_rate)) { pr_err("Clock %s has unknown fixed frequency\n", - __clk_get_name(hw->clk)); + clk_hw_get_name(hw)); BUG(); } return pll->params->fixed_rate; @@ -1577,7 +1577,7 @@ struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name, if (!pll_params->pdiv_tohw) return ERR_PTR(-EINVAL); - parent_rate = __clk_get_rate(parent); + parent_rate = clk_get_rate(parent); pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate); @@ -1674,7 +1674,7 @@ struct clk *tegra_clk_register_pllm(const char *name, const char *parent_name, return ERR_PTR(-EINVAL); } - parent_rate = __clk_get_rate(parent); + parent_rate = clk_get_rate(parent); pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate); @@ -1715,7 +1715,7 @@ struct clk *tegra_clk_register_pllc(const char *name, const char *parent_name, return ERR_PTR(-EINVAL); } - parent_rate = __clk_get_rate(parent); + parent_rate = clk_get_rate(parent); pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate); @@ -1848,7 +1848,7 @@ struct clk *tegra_clk_register_pllss(const char *name, const char *parent_name, val &= ~PLLSS_REF_SRC_SEL_MASK; pll_writel_base(val, pll); - parent_rate = __clk_get_rate(parent); + parent_rate = clk_get_rate(parent); pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate); diff --git a/drivers/clk/tegra/clk-super.c b/drivers/clk/tegra/clk-super.c index 2fd924d38..131d1b508 100644 --- a/drivers/clk/tegra/clk-super.c +++ b/drivers/clk/tegra/clk-super.c @@ -20,7 +20,6 @@ #include #include #include -#include #include "clk.h" diff --git a/drivers/clk/tegra/clk-tegra-audio.c b/drivers/clk/tegra/clk-tegra-audio.c index 5c38aab2c..11e3ad7ad 100644 --- a/drivers/clk/tegra/clk-tegra-audio.c +++ b/drivers/clk/tegra/clk-tegra-audio.c @@ -15,7 +15,6 @@ */ #include -#include #include #include #include diff --git a/drivers/clk/tegra/clk-tegra-fixed.c b/drivers/clk/tegra/clk-tegra-fixed.c index 605676d36..da0b5941c 100644 --- a/drivers/clk/tegra/clk-tegra-fixed.c +++ b/drivers/clk/tegra/clk-tegra-fixed.c @@ -15,7 +15,6 @@ */ #include -#include #include #include #include diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c index 46af9244b..cb6ab8309 100644 --- a/drivers/clk/tegra/clk-tegra-periph.c +++ b/drivers/clk/tegra/clk-tegra-periph.c @@ -15,7 +15,6 @@ */ #include -#include #include #include #include diff --git a/drivers/clk/tegra/clk-tegra-pmc.c b/drivers/clk/tegra/clk-tegra-pmc.c index 08b21c1ee..91377abfe 100644 --- a/drivers/clk/tegra/clk-tegra-pmc.c +++ b/drivers/clk/tegra/clk-tegra-pmc.c @@ -15,7 +15,6 @@ */ #include -#include #include #include #include diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c index feb3201c8..5b1d72393 100644 --- a/drivers/clk/tegra/clk-tegra-super-gen4.c +++ b/drivers/clk/tegra/clk-tegra-super-gen4.c @@ -15,7 +15,6 @@ */ #include -#include #include #include #include @@ -44,7 +43,9 @@ static const char *sclk_parents[] = { "clk_m", "pll_c_out1", "pll_p_out4", static const char *cclk_g_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m", "pll_p", "pll_p_out4", "unused", - "unused", "pll_x" }; + "unused", "pll_x", "unused", "unused", + "unused", "unused", "unused", "unused", + "dfllCPU_out" }; static const char *cclk_lp_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m", "pll_p", "pll_p_out4", "unused", diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c index 8237d16b4..db5871519 100644 --- a/drivers/clk/tegra/clk-tegra114.c +++ b/drivers/clk/tegra/clk-tegra114.c @@ -15,9 +15,7 @@ */ #include -#include #include -#include #include #include #include diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c new file mode 100644 index 000000000..61253330c --- /dev/null +++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c @@ -0,0 +1,166 @@ +/* + * Tegra124 DFLL FCPU clock source driver + * + * Copyright (C) 2012-2014 NVIDIA Corporation. All rights reserved. + * + * Aleksandr Frid + * Paul Walmsley + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include +#include +#include +#include +#include +#include + +#include "clk.h" +#include "clk-dfll.h" +#include "cvb.h" + +/* Maximum CPU frequency, indexed by CPU speedo id */ +static const unsigned long cpu_max_freq_table[] = { + [0] = 2014500000UL, + [1] = 2320500000UL, + [2] = 2116500000UL, + [3] = 2524500000UL, +}; + +static const struct cvb_table tegra124_cpu_cvb_tables[] = { + { + .speedo_id = -1, + .process_id = -1, + .min_millivolts = 900, + .max_millivolts = 1260, + .alignment = { + .step_uv = 10000, /* 10mV */ + }, + .speedo_scale = 100, + .voltage_scale = 1000, + .cvb_table = { + {204000000UL, {1112619, -29295, 402} }, + {306000000UL, {1150460, -30585, 402} }, + {408000000UL, {1190122, -31865, 402} }, + {510000000UL, {1231606, -33155, 402} }, + {612000000UL, {1274912, -34435, 402} }, + {714000000UL, {1320040, -35725, 402} }, + {816000000UL, {1366990, -37005, 402} }, + {918000000UL, {1415762, -38295, 402} }, + {1020000000UL, {1466355, -39575, 402} }, + {1122000000UL, {1518771, -40865, 402} }, + {1224000000UL, {1573009, -42145, 402} }, + {1326000000UL, {1629068, -43435, 402} }, + {1428000000UL, {1686950, -44715, 402} }, + {1530000000UL, {1746653, -46005, 402} }, + {1632000000UL, {1808179, -47285, 402} }, + {1734000000UL, {1871526, -48575, 402} }, + {1836000000UL, {1936696, -49855, 402} }, + {1938000000UL, {2003687, -51145, 402} }, + {2014500000UL, {2054787, -52095, 402} }, + {2116500000UL, {2124957, -53385, 402} }, + {2218500000UL, {2196950, -54665, 402} }, + {2320500000UL, {2270765, -55955, 402} }, + {2422500000UL, {2346401, -57235, 402} }, + {2524500000UL, {2437299, -58535, 402} }, + {0, { 0, 0, 0} }, + }, + .cpu_dfll_data = { + .tune0_low = 0x005020ff, + .tune0_high = 0x005040ff, + .tune1 = 0x00000060, + } + }, +}; + +static int tegra124_dfll_fcpu_probe(struct platform_device *pdev) +{ + int process_id, speedo_id, speedo_value; + struct tegra_dfll_soc_data *soc; + const struct cvb_table *cvb; + + process_id = tegra_sku_info.cpu_process_id; + speedo_id = tegra_sku_info.cpu_speedo_id; + speedo_value = tegra_sku_info.cpu_speedo_value; + + if (speedo_id >= ARRAY_SIZE(cpu_max_freq_table)) { + dev_err(&pdev->dev, "unknown max CPU freq for speedo_id=%d\n", + speedo_id); + return -ENODEV; + } + + soc = devm_kzalloc(&pdev->dev, sizeof(*soc), GFP_KERNEL); + if (!soc) + return -ENOMEM; + + soc->dev = get_cpu_device(0); + if (!soc->dev) { + dev_err(&pdev->dev, "no CPU0 device\n"); + return -ENODEV; + } + + cvb = tegra_cvb_build_opp_table(tegra124_cpu_cvb_tables, + ARRAY_SIZE(tegra124_cpu_cvb_tables), + process_id, speedo_id, speedo_value, + cpu_max_freq_table[speedo_id], + soc->dev); + if (IS_ERR(cvb)) { + dev_err(&pdev->dev, "couldn't build OPP table: %ld\n", + PTR_ERR(cvb)); + return PTR_ERR(cvb); + } + + soc->min_millivolts = cvb->min_millivolts; + soc->tune0_low = cvb->cpu_dfll_data.tune0_low; + soc->tune0_high = cvb->cpu_dfll_data.tune0_high; + soc->tune1 = cvb->cpu_dfll_data.tune1; + + return tegra_dfll_register(pdev, soc); +} + +static const struct of_device_id tegra124_dfll_fcpu_of_match[] = { + { .compatible = "nvidia,tegra124-dfll", }, + { }, +}; +MODULE_DEVICE_TABLE(of, tegra124_dfll_fcpu_of_match); + +static const struct dev_pm_ops tegra124_dfll_pm_ops = { + SET_RUNTIME_PM_OPS(tegra_dfll_runtime_suspend, + tegra_dfll_runtime_resume, NULL) +}; + +static struct platform_driver tegra124_dfll_fcpu_driver = { + .probe = tegra124_dfll_fcpu_probe, + .remove = tegra_dfll_unregister, + .driver = { + .name = "tegra124-dfll", + .of_match_table = tegra124_dfll_fcpu_of_match, + .pm = &tegra124_dfll_pm_ops, + }, +}; + +static int __init tegra124_dfll_fcpu_init(void) +{ + return platform_driver_register(&tegra124_dfll_fcpu_driver); +} +module_init(tegra124_dfll_fcpu_init); + +static void __exit tegra124_dfll_fcpu_exit(void) +{ + platform_driver_unregister(&tegra124_dfll_fcpu_driver); +} +module_exit(tegra124_dfll_fcpu_exit); + +MODULE_DESCRIPTION("Tegra124 DFLL clock source driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Aleksandr Frid "); +MODULE_AUTHOR("Paul Walmsley "); diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c index e8cca3eac..824d75883 100644 --- a/drivers/clk/tegra/clk-tegra124.c +++ b/drivers/clk/tegra/clk-tegra124.c @@ -15,7 +15,6 @@ */ #include -#include #include #include #include @@ -24,6 +23,7 @@ #include #include #include +#include #include "clk.h" #include "clk-id.h" @@ -39,6 +39,9 @@ #define CLK_SOURCE_CSITE 0x1d4 #define CLK_SOURCE_EMC 0x19c +#define RST_DFLL_DVCO 0x2f4 +#define DVFS_DFLL_RESET_SHIFT 0 + #define PLLC_BASE 0x80 #define PLLC_OUT 0x84 #define PLLC_MISC2 0x88 @@ -94,6 +97,8 @@ #define PMC_PLLM_WB0_OVERRIDE 0x1dc #define PMC_PLLM_WB0_OVERRIDE_2 0x2b0 +#define CCLKG_BURST_POLICY 0x368 + #define UTMIP_PLL_CFG2 0x488 #define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xffff) << 6) #define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18) @@ -126,6 +131,8 @@ #ifdef CONFIG_PM_SLEEP static struct cpu_clk_suspend_context { u32 clk_csite_src; + u32 cclkg_burst; + u32 cclkg_divider; } tegra124_cpu_clk_sctx; #endif @@ -1319,12 +1326,22 @@ static void tegra124_cpu_clock_suspend(void) tegra124_cpu_clk_sctx.clk_csite_src = readl(clk_base + CLK_SOURCE_CSITE); writel(3 << 30, clk_base + CLK_SOURCE_CSITE); + + tegra124_cpu_clk_sctx.cclkg_burst = + readl(clk_base + CCLKG_BURST_POLICY); + tegra124_cpu_clk_sctx.cclkg_divider = + readl(clk_base + CCLKG_BURST_POLICY + 4); } static void tegra124_cpu_clock_resume(void) { writel(tegra124_cpu_clk_sctx.clk_csite_src, clk_base + CLK_SOURCE_CSITE); + + writel(tegra124_cpu_clk_sctx.cclkg_burst, + clk_base + CCLKG_BURST_POLICY); + writel(tegra124_cpu_clk_sctx.cclkg_divider, + clk_base + CCLKG_BURST_POLICY + 4); } #endif @@ -1414,6 +1431,68 @@ static void __init tegra124_clock_apply_init_table(void) tegra_init_from_table(tegra124_init_table, clks, TEGRA124_CLK_CLK_MAX); } +/** + * tegra124_car_barrier - wait for pending writes to the CAR to complete + * + * Wait for any outstanding writes to the CAR MMIO space from this CPU + * to complete before continuing execution. No return value. + */ +static void tegra124_car_barrier(void) +{ + readl_relaxed(clk_base + RST_DFLL_DVCO); +} + +/** + * tegra124_clock_assert_dfll_dvco_reset - assert the DFLL's DVCO reset + * + * Assert the reset line of the DFLL's DVCO. No return value. + */ +static void tegra124_clock_assert_dfll_dvco_reset(void) +{ + u32 v; + + v = readl_relaxed(clk_base + RST_DFLL_DVCO); + v |= (1 << DVFS_DFLL_RESET_SHIFT); + writel_relaxed(v, clk_base + RST_DFLL_DVCO); + tegra124_car_barrier(); +} + +/** + * tegra124_clock_deassert_dfll_dvco_reset - deassert the DFLL's DVCO reset + * + * Deassert the reset line of the DFLL's DVCO, allowing the DVCO to + * operate. No return value. + */ +static void tegra124_clock_deassert_dfll_dvco_reset(void) +{ + u32 v; + + v = readl_relaxed(clk_base + RST_DFLL_DVCO); + v &= ~(1 << DVFS_DFLL_RESET_SHIFT); + writel_relaxed(v, clk_base + RST_DFLL_DVCO); + tegra124_car_barrier(); +} + +static int tegra124_reset_assert(unsigned long id) +{ + if (id == TEGRA124_RST_DFLL_DVCO) + tegra124_clock_assert_dfll_dvco_reset(); + else + return -EINVAL; + + return 0; +} + +static int tegra124_reset_deassert(unsigned long id) +{ + if (id == TEGRA124_RST_DFLL_DVCO) + tegra124_clock_deassert_dfll_dvco_reset(); + else + return -EINVAL; + + return 0; +} + /** * tegra132_clock_apply_init_table - initialize clocks on Tegra132 SoCs * @@ -1499,6 +1578,8 @@ static void __init tegra124_132_clock_init_post(struct device_node *np) { tegra_super_clk_gen4_init(clk_base, pmc_base, tegra124_clks, &pll_x_params); + tegra_init_special_resets(1, tegra124_reset_assert, + tegra124_reset_deassert); tegra_add_of_provider(np); clks[TEGRA124_CLK_EMC] = tegra_clk_register_emc(clk_base, np, diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c index 41272dcc9..bf004f0e4 100644 --- a/drivers/clk/tegra/clk-tegra20.c +++ b/drivers/clk/tegra/clk-tegra20.c @@ -15,7 +15,6 @@ */ #include -#include #include #include #include diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c index 0af3e834d..fad561a58 100644 --- a/drivers/clk/tegra/clk-tegra30.c +++ b/drivers/clk/tegra/clk-tegra30.c @@ -16,7 +16,6 @@ #include #include -#include #include #include #include diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c index 41cd87c67..2a3a4fe80 100644 --- a/drivers/clk/tegra/clk.c +++ b/drivers/clk/tegra/clk.c @@ -14,6 +14,7 @@ * along with this program. If not, see . */ +#include #include #include #include @@ -49,7 +50,6 @@ #define RST_DEVICES_L 0x004 #define RST_DEVICES_H 0x008 #define RST_DEVICES_U 0x00C -#define RST_DFLL_DVCO 0x2F4 #define RST_DEVICES_V 0x358 #define RST_DEVICES_W 0x35C #define RST_DEVICES_X 0x28C @@ -79,6 +79,11 @@ static struct clk **clks; static int clk_num; static struct clk_onecell_data clk_data; +/* Handlers for SoC-specific reset lines */ +static int (*special_reset_assert)(unsigned long); +static int (*special_reset_deassert)(unsigned long); +static unsigned int num_special_reset; + static struct tegra_clk_periph_regs periph_regs[] = { [0] = { .enb_reg = CLK_OUT_ENB_L, @@ -152,19 +157,29 @@ static int tegra_clk_rst_assert(struct reset_controller_dev *rcdev, */ tegra_read_chipid(); - writel_relaxed(BIT(id % 32), - clk_base + periph_regs[id / 32].rst_set_reg); + if (id < periph_banks * 32) { + writel_relaxed(BIT(id % 32), + clk_base + periph_regs[id / 32].rst_set_reg); + return 0; + } else if (id < periph_banks * 32 + num_special_reset) { + return special_reset_assert(id); + } - return 0; + return -EINVAL; } static int tegra_clk_rst_deassert(struct reset_controller_dev *rcdev, unsigned long id) { - writel_relaxed(BIT(id % 32), - clk_base + periph_regs[id / 32].rst_clr_reg); + if (id < periph_banks * 32) { + writel_relaxed(BIT(id % 32), + clk_base + periph_regs[id / 32].rst_clr_reg); + return 0; + } else if (id < periph_banks * 32 + num_special_reset) { + return special_reset_deassert(id); + } - return 0; + return -EINVAL; } struct tegra_clk_periph_regs *get_reg_bank(int clkid) @@ -286,10 +301,19 @@ void __init tegra_add_of_provider(struct device_node *np) of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); rst_ctlr.of_node = np; - rst_ctlr.nr_resets = periph_banks * 32; + rst_ctlr.nr_resets = periph_banks * 32 + num_special_reset; reset_controller_register(&rst_ctlr); } +void __init tegra_init_special_resets(unsigned int num, + int (*assert)(unsigned long), + int (*deassert)(unsigned long)) +{ + num_special_reset = num; + special_reset_assert = assert; + special_reset_deassert = deassert; +} + void __init tegra_register_devclks(struct tegra_devclk *dev_clks, int num) { int i; diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h index 75ddc8ff8..0621887e0 100644 --- a/drivers/clk/tegra/clk.h +++ b/drivers/clk/tegra/clk.h @@ -591,6 +591,9 @@ struct tegra_devclk { char *con_id; }; +void tegra_init_special_resets(unsigned int num, int (*assert)(unsigned long), + int (*deassert)(unsigned long)); + void tegra_init_from_table(struct tegra_clk_init_table *tbl, struct clk *clks[], int clk_max); diff --git a/drivers/clk/tegra/cvb.c b/drivers/clk/tegra/cvb.c new file mode 100644 index 000000000..0204e0861 --- /dev/null +++ b/drivers/clk/tegra/cvb.c @@ -0,0 +1,140 @@ +/* + * Utility functions for parsing Tegra CVB voltage tables + * + * Copyright (C) 2012-2014 NVIDIA Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#include +#include +#include + +#include "cvb.h" + +/* cvb_mv = ((c2 * speedo / s_scale + c1) * speedo / s_scale + c0) */ +static inline int get_cvb_voltage(int speedo, int s_scale, + const struct cvb_coefficients *cvb) +{ + int mv; + + /* apply only speedo scale: output mv = cvb_mv * v_scale */ + mv = DIV_ROUND_CLOSEST(cvb->c2 * speedo, s_scale); + mv = DIV_ROUND_CLOSEST((mv + cvb->c1) * speedo, s_scale) + cvb->c0; + return mv; +} + +static int round_cvb_voltage(int mv, int v_scale, + const struct rail_alignment *align) +{ + /* combined: apply voltage scale and round to cvb alignment step */ + int uv; + int step = (align->step_uv ? : 1000) * v_scale; + int offset = align->offset_uv * v_scale; + + uv = max(mv * 1000, offset) - offset; + uv = DIV_ROUND_UP(uv, step) * align->step_uv + align->offset_uv; + return uv / 1000; +} + +enum { + DOWN, + UP +}; + +static int round_voltage(int mv, const struct rail_alignment *align, int up) +{ + if (align->step_uv) { + int uv; + + uv = max(mv * 1000, align->offset_uv) - align->offset_uv; + uv = (uv + (up ? align->step_uv - 1 : 0)) / align->step_uv; + return (uv * align->step_uv + align->offset_uv) / 1000; + } + return mv; +} + +static int build_opp_table(const struct cvb_table *d, + int speedo_value, + unsigned long max_freq, + struct device *opp_dev) +{ + int i, ret, dfll_mv, min_mv, max_mv; + const struct cvb_table_freq_entry *table = NULL; + const struct rail_alignment *align = &d->alignment; + + min_mv = round_voltage(d->min_millivolts, align, UP); + max_mv = round_voltage(d->max_millivolts, align, DOWN); + + for (i = 0; i < MAX_DVFS_FREQS; i++) { + table = &d->cvb_table[i]; + if (!table->freq || (table->freq > max_freq)) + break; + + /* + * FIXME after clk_round_rate/clk_determine_rate prototypes + * have been updated + */ + if (table->freq & (1<<31)) + continue; + + dfll_mv = get_cvb_voltage( + speedo_value, d->speedo_scale, &table->coefficients); + dfll_mv = round_cvb_voltage(dfll_mv, d->voltage_scale, align); + dfll_mv = clamp(dfll_mv, min_mv, max_mv); + + ret = dev_pm_opp_add(opp_dev, table->freq, dfll_mv * 1000); + if (ret) + return ret; + } + + return 0; +} + +/** + * tegra_cvb_build_opp_table - build OPP table from Tegra CVB tables + * @cvb_tables: array of CVB tables + * @sz: size of the previously mentioned array + * @process_id: process id of the HW module + * @speedo_id: speedo id of the HW module + * @speedo_value: speedo value of the HW module + * @max_rate: highest safe clock rate + * @opp_dev: the struct device * for which the OPP table is built + * + * On Tegra, a CVB table encodes the relationship between operating voltage + * and safe maximal frequency for a given module (e.g. GPU or CPU). This + * function calculates the optimal voltage-frequency operating points + * for the given arguments and exports them via the OPP library for the + * given @opp_dev. Returns a pointer to the struct cvb_table that matched + * or an ERR_PTR on failure. + */ +const struct cvb_table *tegra_cvb_build_opp_table( + const struct cvb_table *cvb_tables, + size_t sz, int process_id, + int speedo_id, int speedo_value, + unsigned long max_rate, + struct device *opp_dev) +{ + int i, ret; + + for (i = 0; i < sz; i++) { + const struct cvb_table *d = &cvb_tables[i]; + + if (d->speedo_id != -1 && d->speedo_id != speedo_id) + continue; + if (d->process_id != -1 && d->process_id != process_id) + continue; + + ret = build_opp_table(d, speedo_value, max_rate, opp_dev); + return ret ? ERR_PTR(ret) : d; + } + + return ERR_PTR(-EINVAL); +} diff --git a/drivers/clk/tegra/cvb.h b/drivers/clk/tegra/cvb.h new file mode 100644 index 000000000..f62cdc4f4 --- /dev/null +++ b/drivers/clk/tegra/cvb.h @@ -0,0 +1,67 @@ +/* + * Utility functions for parsing Tegra CVB voltage tables + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef __DRIVERS_CLK_TEGRA_CVB_H +#define __DRIVERS_CLK_TEGRA_CVB_H + +#include + +struct device; + +#define MAX_DVFS_FREQS 40 + +struct rail_alignment { + int offset_uv; + int step_uv; +}; + +struct cvb_coefficients { + int c0; + int c1; + int c2; +}; + +struct cvb_table_freq_entry { + unsigned long freq; + struct cvb_coefficients coefficients; +}; + +struct cvb_cpu_dfll_data { + u32 tune0_low; + u32 tune0_high; + u32 tune1; +}; + +struct cvb_table { + int speedo_id; + int process_id; + + int min_millivolts; + int max_millivolts; + struct rail_alignment alignment; + + int speedo_scale; + int voltage_scale; + struct cvb_table_freq_entry cvb_table[MAX_DVFS_FREQS]; + struct cvb_cpu_dfll_data cpu_dfll_data; +}; + +const struct cvb_table *tegra_cvb_build_opp_table( + const struct cvb_table *cvb_tables, + size_t sz, int process_id, + int speedo_id, int speedo_value, + unsigned long max_rate, + struct device *opp_dev); + +#endif diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile index 105ffd0f5..d4ac96087 100644 --- a/drivers/clk/ti/Makefile +++ b/drivers/clk/ti/Makefile @@ -1,16 +1,19 @@ obj-y += clk.o autoidle.o clockdomain.o clk-common = dpll.o composite.o divider.o gate.o \ - fixed-factor.o mux.o apll.o -obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o -obj-$(CONFIG_SOC_TI81XX) += $(clk-common) fapll.o clk-816x.o + fixed-factor.o mux.o apll.o \ + clkt_dpll.o clkt_iclk.o clkt_dflt.o +obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o dpll3xxx.o +obj-$(CONFIG_SOC_TI81XX) += $(clk-common) fapll.o clk-814x.o clk-816x.o obj-$(CONFIG_ARCH_OMAP2) += $(clk-common) interface.o clk-2xxx.o obj-$(CONFIG_ARCH_OMAP3) += $(clk-common) interface.o \ - clk-3xxx.o -obj-$(CONFIG_ARCH_OMAP4) += $(clk-common) clk-44xx.o -obj-$(CONFIG_SOC_OMAP5) += $(clk-common) clk-54xx.o + clk-3xxx.o dpll3xxx.o +obj-$(CONFIG_ARCH_OMAP4) += $(clk-common) clk-44xx.o \ + dpll3xxx.o dpll44xx.o +obj-$(CONFIG_SOC_OMAP5) += $(clk-common) clk-54xx.o \ + dpll3xxx.o dpll44xx.o obj-$(CONFIG_SOC_DRA7XX) += $(clk-common) clk-7xx.o \ - clk-dra7-atl.o -obj-$(CONFIG_SOC_AM43XX) += $(clk-common) clk-43xx.o + clk-dra7-atl.o dpll3xxx.o dpll44xx.o +obj-$(CONFIG_SOC_AM43XX) += $(clk-common) dpll3xxx.o clk-43xx.o ifdef CONFIG_ATAGS obj-$(CONFIG_ARCH_OMAP3) += clk-3xxx-legacy.o diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c index 49baf3831..f3eab6e79 100644 --- a/drivers/clk/ti/apll.c +++ b/drivers/clk/ti/apll.c @@ -15,6 +15,7 @@ * GNU General Public License for more details. */ +#include #include #include #include @@ -27,6 +28,8 @@ #include #include +#include "clock.h" + #define APLL_FORCE_LOCK 0x1 #define APLL_AUTO_IDLE 0x2 #define MAX_APLL_WAIT_TRIES 1000000 @@ -47,7 +50,7 @@ static int dra7_apll_enable(struct clk_hw *hw) if (!ad) return -EINVAL; - clk_name = __clk_get_name(clk->hw.clk); + clk_name = clk_hw_get_name(&clk->hw); state <<= __ffs(ad->idlest_mask); @@ -170,7 +173,6 @@ static void __init of_dra7_apll_setup(struct device_node *node) struct clk_hw_omap *clk_hw = NULL; struct clk_init_data *init = NULL; const char **parent_names = NULL; - int i; ad = kzalloc(sizeof(*ad), GFP_KERNEL); clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); @@ -195,8 +197,7 @@ static void __init of_dra7_apll_setup(struct device_node *node) if (!parent_names) goto cleanup; - for (i = 0; i < init->num_parents; i++) - parent_names[i] = of_clk_get_parent_name(node, i); + of_clk_parent_fill(node, parent_names, init->num_parents); init->parent_names = parent_names; @@ -272,7 +273,7 @@ static int omap2_apll_enable(struct clk_hw *hw) if (i == MAX_APLL_WAIT_TRIES) { pr_warn("%s failed to transition to locked\n", - __clk_get_name(clk->hw.clk)); + clk_hw_get_name(&clk->hw)); return -EBUSY; } diff --git a/drivers/clk/ti/autoidle.c b/drivers/clk/ti/autoidle.c index e75c64c9e..345af4346 100644 --- a/drivers/clk/ti/autoidle.c +++ b/drivers/clk/ti/autoidle.c @@ -22,6 +22,8 @@ #include #include +#include "clock.h" + struct clk_ti_autoidle { void __iomem *reg; u8 shift; @@ -33,8 +35,41 @@ struct clk_ti_autoidle { #define AUTOIDLE_LOW 0x1 static LIST_HEAD(autoidle_clks); +static LIST_HEAD(clk_hw_omap_clocks); + +/** + * omap2_clk_deny_idle - disable autoidle on an OMAP clock + * @clk: struct clk * to disable autoidle for + * + * Disable autoidle on an OMAP clock. + */ +int omap2_clk_deny_idle(struct clk *clk) +{ + struct clk_hw_omap *c; -static void ti_allow_autoidle(struct clk_ti_autoidle *clk) + c = to_clk_hw_omap(__clk_get_hw(clk)); + if (c->ops && c->ops->deny_idle) + c->ops->deny_idle(c); + return 0; +} + +/** + * omap2_clk_allow_idle - enable autoidle on an OMAP clock + * @clk: struct clk * to enable autoidle for + * + * Enable autoidle on an OMAP clock. + */ +int omap2_clk_allow_idle(struct clk *clk) +{ + struct clk_hw_omap *c; + + c = to_clk_hw_omap(__clk_get_hw(clk)); + if (c->ops && c->ops->allow_idle) + c->ops->allow_idle(c); + return 0; +} + +static void _allow_autoidle(struct clk_ti_autoidle *clk) { u32 val; @@ -48,7 +83,7 @@ static void ti_allow_autoidle(struct clk_ti_autoidle *clk) ti_clk_ll_ops->clk_writel(val, clk->reg); } -static void ti_deny_autoidle(struct clk_ti_autoidle *clk) +static void _deny_autoidle(struct clk_ti_autoidle *clk) { u32 val; @@ -63,31 +98,31 @@ static void ti_deny_autoidle(struct clk_ti_autoidle *clk) } /** - * of_ti_clk_allow_autoidle_all - enable autoidle for all clocks + * _clk_generic_allow_autoidle_all - enable autoidle for all clocks * * Enables hardware autoidle for all registered DT clocks, which have * the feature. */ -void of_ti_clk_allow_autoidle_all(void) +static void _clk_generic_allow_autoidle_all(void) { struct clk_ti_autoidle *c; list_for_each_entry(c, &autoidle_clks, node) - ti_allow_autoidle(c); + _allow_autoidle(c); } /** - * of_ti_clk_deny_autoidle_all - disable autoidle for all clocks + * _clk_generic_deny_autoidle_all - disable autoidle for all clocks * * Disables hardware autoidle for all registered DT clocks, which have * the feature. */ -void of_ti_clk_deny_autoidle_all(void) +static void _clk_generic_deny_autoidle_all(void) { struct clk_ti_autoidle *c; list_for_each_entry(c, &autoidle_clks, node) - ti_deny_autoidle(c); + _deny_autoidle(c); } /** @@ -131,3 +166,67 @@ int __init of_ti_clk_autoidle_setup(struct device_node *node) return 0; } + +/** + * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock + * @hw: struct clk_hw * to initialize + * + * Add an OMAP clock @clk to the internal list of OMAP clocks. Used + * temporarily for autoidle handling, until this support can be + * integrated into the common clock framework code in some way. No + * return value. + */ +void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw) +{ + struct clk_hw_omap *c; + + if (clk_hw_get_flags(hw) & CLK_IS_BASIC) + return; + + c = to_clk_hw_omap(hw); + list_add(&c->node, &clk_hw_omap_clocks); +} + +/** + * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that + * support it + * + * Enable clock autoidle on all OMAP clocks that have allow_idle + * function pointers associated with them. This function is intended + * to be temporary until support for this is added to the common clock + * code. Returns 0. + */ +int omap2_clk_enable_autoidle_all(void) +{ + struct clk_hw_omap *c; + + list_for_each_entry(c, &clk_hw_omap_clocks, node) + if (c->ops && c->ops->allow_idle) + c->ops->allow_idle(c); + + _clk_generic_allow_autoidle_all(); + + return 0; +} + +/** + * omap2_clk_disable_autoidle_all - disable autoidle on all OMAP clocks that + * support it + * + * Disable clock autoidle on all OMAP clocks that have allow_idle + * function pointers associated with them. This function is intended + * to be temporary until support for this is added to the common clock + * code. Returns 0. + */ +int omap2_clk_disable_autoidle_all(void) +{ + struct clk_hw_omap *c; + + list_for_each_entry(c, &clk_hw_omap_clocks, node) + if (c->ops && c->ops->deny_idle) + c->ops->deny_idle(c); + + _clk_generic_deny_autoidle_all(); + + return 0; +} diff --git a/drivers/clk/ti/clk-2xxx.c b/drivers/clk/ti/clk-2xxx.c index c808ab3d2..657c4fe07 100644 --- a/drivers/clk/ti/clk-2xxx.c +++ b/drivers/clk/ti/clk-2xxx.c @@ -16,9 +16,11 @@ #include #include -#include +#include #include +#include "clock.h" + static struct ti_dt_clk omap2xxx_clks[] = { DT_CLK(NULL, "func_32k_ck", "func_32k_ck"), DT_CLK(NULL, "secure_32k_ck", "secure_32k_ck"), diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c index 028b33783..ef2ec64fe 100644 --- a/drivers/clk/ti/clk-33xx.c +++ b/drivers/clk/ti/clk-33xx.c @@ -16,9 +16,12 @@ #include #include +#include #include #include +#include "clock.h" + static struct ti_dt_clk am33xx_clks[] = { DT_CLK(NULL, "clk_32768_ck", "clk_32768_ck"), DT_CLK(NULL, "clk_rc32k_ck", "clk_rc32k_ck"), diff --git a/drivers/clk/ti/clk-3xxx-legacy.c b/drivers/clk/ti/clk-3xxx-legacy.c index 0b61548d5..0fbf8a917 100644 --- a/drivers/clk/ti/clk-3xxx-legacy.c +++ b/drivers/clk/ti/clk-3xxx-legacy.c @@ -15,6 +15,7 @@ */ #include +#include #include #include diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c index 4ab28cfb8..8831e1a05 100644 --- a/drivers/clk/ti/clk-3xxx.c +++ b/drivers/clk/ti/clk-3xxx.c @@ -16,9 +16,220 @@ #include #include +#include #include #include +#include "clock.h" + +/* + * DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks + * that are sourced by DPLL5, and both of these require this clock + * to be at 120 MHz for proper operation. + */ +#define DPLL5_FREQ_FOR_USBHOST 120000000 + +#define OMAP3430ES2_ST_DSS_IDLE_SHIFT 1 +#define OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT 5 +#define OMAP3430ES2_ST_SSI_IDLE_SHIFT 8 + +#define OMAP34XX_CM_IDLEST_VAL 1 + +/* + * In AM35xx IPSS, the {ICK,FCK} enable bits for modules are exported + * in the same register at a bit offset of 0x8. The EN_ACK for ICK is + * at an offset of 4 from ICK enable bit. + */ +#define AM35XX_IPSS_ICK_MASK 0xF +#define AM35XX_IPSS_ICK_EN_ACK_OFFSET 0x4 +#define AM35XX_IPSS_ICK_FCK_OFFSET 0x8 +#define AM35XX_IPSS_CLK_IDLEST_VAL 0 + +#define AM35XX_ST_IPSS_SHIFT 5 + +/** + * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI + * @clk: struct clk * being enabled + * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into + * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into + * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator + * + * The OMAP3430ES2 SSI target CM_IDLEST bit is at a different shift + * from the CM_{I,F}CLKEN bit. Pass back the correct info via + * @idlest_reg and @idlest_bit. No return value. + */ +static void omap3430es2_clk_ssi_find_idlest(struct clk_hw_omap *clk, + void __iomem **idlest_reg, + u8 *idlest_bit, + u8 *idlest_val) +{ + u32 r; + + r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); + *idlest_reg = (__force void __iomem *)r; + *idlest_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT; + *idlest_val = OMAP34XX_CM_IDLEST_VAL; +} + +const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait = { + .allow_idle = omap2_clkt_iclk_allow_idle, + .deny_idle = omap2_clkt_iclk_deny_idle, + .find_idlest = omap3430es2_clk_ssi_find_idlest, + .find_companion = omap2_clk_dflt_find_companion, +}; + +/** + * omap3430es2_clk_dss_usbhost_find_idlest - CM_IDLEST info for DSS, USBHOST + * @clk: struct clk * being enabled + * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into + * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into + * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator + * + * Some OMAP modules on OMAP3 ES2+ chips have both initiator and + * target IDLEST bits. For our purposes, we are concerned with the + * target IDLEST bits, which exist at a different bit position than + * the *CLKEN bit position for these modules (DSS and USBHOST) (The + * default find_idlest code assumes that they are at the same + * position.) No return value. + */ +static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk_hw_omap *clk, + void __iomem **idlest_reg, + u8 *idlest_bit, + u8 *idlest_val) +{ + u32 r; + + r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); + *idlest_reg = (__force void __iomem *)r; + /* USBHOST_IDLE has same shift */ + *idlest_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT; + *idlest_val = OMAP34XX_CM_IDLEST_VAL; +} + +const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait = { + .find_idlest = omap3430es2_clk_dss_usbhost_find_idlest, + .find_companion = omap2_clk_dflt_find_companion, +}; + +const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait = { + .allow_idle = omap2_clkt_iclk_allow_idle, + .deny_idle = omap2_clkt_iclk_deny_idle, + .find_idlest = omap3430es2_clk_dss_usbhost_find_idlest, + .find_companion = omap2_clk_dflt_find_companion, +}; + +/** + * omap3430es2_clk_hsotgusb_find_idlest - return CM_IDLEST info for HSOTGUSB + * @clk: struct clk * being enabled + * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into + * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into + * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator + * + * The OMAP3430ES2 HSOTGUSB target CM_IDLEST bit is at a different + * shift from the CM_{I,F}CLKEN bit. Pass back the correct info via + * @idlest_reg and @idlest_bit. No return value. + */ +static void omap3430es2_clk_hsotgusb_find_idlest(struct clk_hw_omap *clk, + void __iomem **idlest_reg, + u8 *idlest_bit, + u8 *idlest_val) +{ + u32 r; + + r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); + *idlest_reg = (__force void __iomem *)r; + *idlest_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT; + *idlest_val = OMAP34XX_CM_IDLEST_VAL; +} + +const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait = { + .allow_idle = omap2_clkt_iclk_allow_idle, + .deny_idle = omap2_clkt_iclk_deny_idle, + .find_idlest = omap3430es2_clk_hsotgusb_find_idlest, + .find_companion = omap2_clk_dflt_find_companion, +}; + +/** + * am35xx_clk_find_idlest - return clock ACK info for AM35XX IPSS + * @clk: struct clk * being enabled + * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into + * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into + * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator + * + * The interface clocks on AM35xx IPSS reflects the clock idle status + * in the enable register itsel at a bit offset of 4 from the enable + * bit. A value of 1 indicates that clock is enabled. + */ +static void am35xx_clk_find_idlest(struct clk_hw_omap *clk, + void __iomem **idlest_reg, + u8 *idlest_bit, + u8 *idlest_val) +{ + *idlest_reg = (__force void __iomem *)(clk->enable_reg); + *idlest_bit = clk->enable_bit + AM35XX_IPSS_ICK_EN_ACK_OFFSET; + *idlest_val = AM35XX_IPSS_CLK_IDLEST_VAL; +} + +/** + * am35xx_clk_find_companion - find companion clock to @clk + * @clk: struct clk * to find the companion clock of + * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in + * @other_bit: u8 ** to return the companion clock bit shift in + * + * Some clocks don't have companion clocks. For example, modules with + * only an interface clock (such as HECC) don't have a companion + * clock. Right now, this code relies on the hardware exporting a bit + * in the correct companion register that indicates that the + * nonexistent 'companion clock' is active. Future patches will + * associate this type of code with per-module data structures to + * avoid this issue, and remove the casts. No return value. + */ +static void am35xx_clk_find_companion(struct clk_hw_omap *clk, + void __iomem **other_reg, + u8 *other_bit) +{ + *other_reg = (__force void __iomem *)(clk->enable_reg); + if (clk->enable_bit & AM35XX_IPSS_ICK_MASK) + *other_bit = clk->enable_bit + AM35XX_IPSS_ICK_FCK_OFFSET; + else + *other_bit = clk->enable_bit - AM35XX_IPSS_ICK_FCK_OFFSET; +} + +const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait = { + .find_idlest = am35xx_clk_find_idlest, + .find_companion = am35xx_clk_find_companion, +}; + +/** + * am35xx_clk_ipss_find_idlest - return CM_IDLEST info for IPSS + * @clk: struct clk * being enabled + * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into + * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into + * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator + * + * The IPSS target CM_IDLEST bit is at a different shift from the + * CM_{I,F}CLKEN bit. Pass back the correct info via @idlest_reg + * and @idlest_bit. No return value. + */ +static void am35xx_clk_ipss_find_idlest(struct clk_hw_omap *clk, + void __iomem **idlest_reg, + u8 *idlest_bit, + u8 *idlest_val) +{ + u32 r; + + r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); + *idlest_reg = (__force void __iomem *)r; + *idlest_bit = AM35XX_ST_IPSS_SHIFT; + *idlest_val = OMAP34XX_CM_IDLEST_VAL; +} + +const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait = { + .allow_idle = omap2_clkt_iclk_allow_idle, + .deny_idle = omap2_clkt_iclk_deny_idle, + .find_idlest = am35xx_clk_ipss_find_idlest, + .find_companion = omap2_clk_dflt_find_companion, +}; static struct ti_dt_clk omap3xxx_clks[] = { DT_CLK(NULL, "apb_pclk", "dummy_apb_pclk"), @@ -324,6 +535,30 @@ enum { OMAP3_SOC_OMAP3630, }; +/** + * omap3_clk_lock_dpll5 - locks DPLL5 + * + * Locks DPLL5 to a pre-defined frequency. This is required for proper + * operation of USB. + */ +void __init omap3_clk_lock_dpll5(void) +{ + struct clk *dpll5_clk; + struct clk *dpll5_m2_clk; + + dpll5_clk = clk_get(NULL, "dpll5_ck"); + clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST); + clk_prepare_enable(dpll5_clk); + + /* Program dpll5_m2_clk divider for no division */ + dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck"); + clk_prepare_enable(dpll5_m2_clk); + clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST); + + clk_disable_unprepare(dpll5_m2_clk); + clk_disable_unprepare(dpll5_clk); +} + static int __init omap3xxx_dt_clk_init(int soc_type) { if (soc_type == OMAP3_SOC_AM35XX || soc_type == OMAP3_SOC_OMAP3630 || diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c index 3795fce8a..097fc90bf 100644 --- a/drivers/clk/ti/clk-43xx.c +++ b/drivers/clk/ti/clk-43xx.c @@ -16,9 +16,12 @@ #include #include +#include #include #include +#include "clock.h" + static struct ti_dt_clk am43xx_clks[] = { DT_CLK(NULL, "clk_32768_ck", "clk_32768_ck"), DT_CLK(NULL, "clk_rc32k_ck", "clk_rc32k_ck"), @@ -71,6 +74,7 @@ static struct ti_dt_clk am43xx_clks[] = { DT_CLK(NULL, "clk_24mhz", "clk_24mhz"), DT_CLK(NULL, "cpsw_125mhz_gclk", "cpsw_125mhz_gclk"), DT_CLK(NULL, "cpsw_cpts_rft_clk", "cpsw_cpts_rft_clk"), + DT_CLK(NULL, "dpll_clksel_mac_clk", "dpll_clksel_mac_clk"), DT_CLK(NULL, "gpio0_dbclk_mux_ck", "gpio0_dbclk_mux_ck"), DT_CLK(NULL, "gpio0_dbclk", "gpio0_dbclk"), DT_CLK(NULL, "gpio1_dbclk", "gpio1_dbclk"), diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c index 581db7711..7a8b51b35 100644 --- a/drivers/clk/ti/clk-44xx.c +++ b/drivers/clk/ti/clk-44xx.c @@ -16,6 +16,8 @@ #include #include +#include "clock.h" + /* * OMAP4 ABE DPLL default frequency. In OMAP4460 TRM version V, section * "3.6.3.2.3 CM1_ABE Clock Generator" states that the "DPLL_ABE_X2_CLK diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c index 96c69a335..59ce2fa2c 100644 --- a/drivers/clk/ti/clk-54xx.c +++ b/drivers/clk/ti/clk-54xx.c @@ -17,6 +17,8 @@ #include #include +#include "clock.h" + #define OMAP5_DPLL_ABE_DEFFREQ 98304000 /* diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c index 0eb82107c..a911d7de3 100644 --- a/drivers/clk/ti/clk-7xx.c +++ b/drivers/clk/ti/clk-7xx.c @@ -16,10 +16,11 @@ #include #include +#include "clock.h" + #define DRA7_DPLL_GMAC_DEFFREQ 1000000000 #define DRA7_DPLL_USB_DEFFREQ 960000000 - static struct ti_dt_clk dra7xx_clks[] = { DT_CLK(NULL, "atl_clkin0_ck", "atl_clkin0_ck"), DT_CLK(NULL, "atl_clkin1_ck", "atl_clkin1_ck"), diff --git a/drivers/clk/ti/clk-814x.c b/drivers/clk/ti/clk-814x.c new file mode 100644 index 000000000..e17292079 --- /dev/null +++ b/drivers/clk/ti/clk-814x.c @@ -0,0 +1,33 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + */ + +#include +#include +#include + +#include "clock.h" + +static struct ti_dt_clk dm814_clks[] = { + DT_CLK(NULL, "devosc_ck", "devosc_ck"), + DT_CLK(NULL, "mpu_ck", "mpu_ck"), + DT_CLK(NULL, "sysclk4_ck", "sysclk4_ck"), + DT_CLK(NULL, "sysclk6_ck", "sysclk6_ck"), + DT_CLK(NULL, "sysclk10_ck", "sysclk10_ck"), + DT_CLK(NULL, "sysclk18_ck", "sysclk18_ck"), + DT_CLK(NULL, "timer_sys_ck", "devosc_ck"), + DT_CLK(NULL, "cpsw_125mhz_gclk", "cpsw_125mhz_gclk"), + DT_CLK(NULL, "cpsw_cpts_rft_clk", "cpsw_cpts_rft_clk"), + { .node_name = NULL }, +}; + +int __init dm814x_dt_clk_init(void) +{ + ti_dt_clocks_register(dm814_clks); + omap2_clk_disable_autoidle_all(); + omap2_clk_enable_init_clocks(NULL, 0); + + return 0; +} diff --git a/drivers/clk/ti/clk-816x.c b/drivers/clk/ti/clk-816x.c index 9451e651a..1dfad0c71 100644 --- a/drivers/clk/ti/clk-816x.c +++ b/drivers/clk/ti/clk-816x.c @@ -14,6 +14,8 @@ #include #include +#include "clock.h" + static struct ti_dt_clk dm816x_clks[] = { DT_CLK(NULL, "sys_clkin", "sys_clkin_ck"), DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"), @@ -42,7 +44,7 @@ static const char *enable_init_clks[] = { "ddr_pll_clk3", }; -int __init ti81xx_dt_clk_init(void) +int __init dm816x_dt_clk_init(void) { ti_dt_clocks_register(dm816x_clks); omap2_clk_disable_autoidle_all(); diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index 19e543a32..2e14dfb58 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -16,6 +16,7 @@ */ #include +#include #include #include #include diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index 64bb5e8a3..b5bcd77e8 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -15,12 +15,15 @@ * GNU General Public License for more details. */ +#include #include #include #include #include #include #include +#include +#include #include "clock.h" @@ -30,6 +33,63 @@ struct ti_clk_ll_ops *ti_clk_ll_ops; static struct device_node *clocks_node_ptr[CLK_MAX_MEMMAPS]; +static struct ti_clk_features ti_clk_features; + +struct clk_iomap { + struct regmap *regmap; + void __iomem *mem; +}; + +static struct clk_iomap *clk_memmaps[CLK_MAX_MEMMAPS]; + +static void clk_memmap_writel(u32 val, void __iomem *reg) +{ + struct clk_omap_reg *r = (struct clk_omap_reg *)® + struct clk_iomap *io = clk_memmaps[r->index]; + + if (io->regmap) + regmap_write(io->regmap, r->offset, val); + else + writel_relaxed(val, io->mem + r->offset); +} + +static u32 clk_memmap_readl(void __iomem *reg) +{ + u32 val; + struct clk_omap_reg *r = (struct clk_omap_reg *)® + struct clk_iomap *io = clk_memmaps[r->index]; + + if (io->regmap) + regmap_read(io->regmap, r->offset, &val); + else + val = readl_relaxed(io->mem + r->offset); + + return val; +} + +/** + * ti_clk_setup_ll_ops - setup low level clock operations + * @ops: low level clock ops descriptor + * + * Sets up low level clock operations for TI clock driver. This is used + * to provide various callbacks for the clock driver towards platform + * specific code. Returns 0 on success, -EBUSY if ll_ops have been + * registered already. + */ +int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops) +{ + if (ti_clk_ll_ops) { + pr_err("Attempt to register ll_ops multiple times.\n"); + return -EBUSY; + } + + ti_clk_ll_ops = ops; + ops->clk_readl = clk_memmap_readl; + ops->clk_writel = clk_memmap_writel; + + return 0; +} + /** * ti_dt_clocks_register - register DT alias clocks during boot * @oclks: list of clocks to register @@ -134,32 +194,67 @@ void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index) reg->offset = val; - return (void __iomem *)tmp; + return (__force void __iomem *)tmp; } /** - * ti_dt_clk_init_provider - init master clock provider + * omap2_clk_provider_init - init master clock provider * @parent: master node * @index: internal index for clk_reg_ops + * @syscon: syscon regmap pointer for accessing clock registers + * @mem: iomem pointer for the clock provider memory area, only used if + * syscon is not provided * * Initializes a master clock IP block. This basically sets up the * mapping from clocks node to the memory map index. All the clocks * are then initialized through the common of_clk_init call, and the * clocks will access their memory maps based on the node layout. + * Returns 0 in success. */ -void ti_dt_clk_init_provider(struct device_node *parent, int index) +int __init omap2_clk_provider_init(struct device_node *parent, int index, + struct regmap *syscon, void __iomem *mem) { struct device_node *clocks; + struct clk_iomap *io; /* get clocks for this parent */ clocks = of_get_child_by_name(parent, "clocks"); if (!clocks) { pr_err("%s missing 'clocks' child node.\n", parent->name); - return; + return -EINVAL; } /* add clocks node info */ clocks_node_ptr[index] = clocks; + + io = kzalloc(sizeof(*io), GFP_KERNEL); + if (!io) + return -ENOMEM; + + io->regmap = syscon; + io->mem = mem; + + clk_memmaps[index] = io; + + return 0; +} + +/** + * omap2_clk_legacy_provider_init - initialize a legacy clock provider + * @index: index for the clock provider + * @mem: iomem pointer for the clock provider memory area + * + * Initializes a legacy clock provider memory mapping. + */ +void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem) +{ + struct clk_iomap *io; + + io = memblock_virt_alloc(sizeof(*io), 0); + + io->mem = mem; + + clk_memmaps[index] = io; } /** @@ -244,11 +339,11 @@ struct clk __init *ti_clk_register_clk(struct ti_clk *setup) if (!IS_ERR(clk)) { setup->clk = clk; if (setup->clkdm_name) { - if (__clk_get_flags(clk) & CLK_IS_BASIC) { + clk_hw = __clk_get_hw(clk); + if (clk_hw_get_flags(clk_hw) & CLK_IS_BASIC) { pr_warn("can't setup clkdm for basic clk %s\n", setup->name); } else { - clk_hw = __clk_get_hw(clk); to_clk_hw_omap(clk_hw)->clkdm_name = setup->clkdm_name; omap2_init_clk_clkdm(clk_hw); @@ -311,3 +406,50 @@ int __init ti_clk_register_legacy_clks(struct ti_clk_alias *clks) return 0; } #endif + +/** + * ti_clk_setup_features - setup clock features flags + * @features: features definition to use + * + * Initializes the clock driver features flags based on platform + * provided data. No return value. + */ +void __init ti_clk_setup_features(struct ti_clk_features *features) +{ + memcpy(&ti_clk_features, features, sizeof(*features)); +} + +/** + * ti_clk_get_features - get clock driver features flags + * + * Get TI clock driver features description. Returns a pointer + * to the current feature setup. + */ +const struct ti_clk_features *ti_clk_get_features(void) +{ + return &ti_clk_features; +} + +/** + * omap2_clk_enable_init_clocks - prepare & enable a list of clocks + * @clk_names: ptr to an array of strings of clock names to enable + * @num_clocks: number of clock names in @clk_names + * + * Prepare and enable a list of clocks, named by @clk_names. No + * return value. XXX Deprecated; only needed until these clocks are + * properly claimed and enabled by the drivers or core code that uses + * them. XXX What code disables & calls clk_put on these clocks? + */ +void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks) +{ + struct clk *init_clk; + int i; + + for (i = 0; i < num_clocks; i++) { + init_clk = clk_get(NULL, clk_names[i]); + if (WARN(IS_ERR(init_clk), "could not find init clock %s\n", + clk_names[i])) + continue; + clk_prepare_enable(init_clk); + } +} diff --git a/drivers/clk/ti/clkt_dflt.c b/drivers/clk/ti/clkt_dflt.c new file mode 100644 index 000000000..1ddc288fc --- /dev/null +++ b/drivers/clk/ti/clkt_dflt.c @@ -0,0 +1,316 @@ +/* + * Default clock type + * + * Copyright (C) 2005-2008, 2015 Texas Instruments, Inc. + * Copyright (C) 2004-2010 Nokia Corporation + * + * Contacts: + * Richard Woodruff + * Paul Walmsley + * Tero Kristo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include "clock.h" + +/* + * MAX_MODULE_ENABLE_WAIT: maximum of number of microseconds to wait + * for a module to indicate that it is no longer in idle + */ +#define MAX_MODULE_ENABLE_WAIT 100000 + +/* + * CM module register offsets, used for calculating the companion + * register addresses. + */ +#define CM_FCLKEN 0x0000 +#define CM_ICLKEN 0x0010 + +/** + * _wait_idlest_generic - wait for a module to leave the idle state + * @clk: module clock to wait for (needed for register offsets) + * @reg: virtual address of module IDLEST register + * @mask: value to mask against to determine if the module is active + * @idlest: idle state indicator (0 or 1) for the clock + * @name: name of the clock (for printk) + * + * Wait for a module to leave idle, where its idle-status register is + * not inside the CM module. Returns 1 if the module left idle + * promptly, or 0 if the module did not leave idle before the timeout + * elapsed. XXX Deprecated - should be moved into drivers for the + * individual IP block that the IDLEST register exists in. + */ +static int _wait_idlest_generic(struct clk_hw_omap *clk, void __iomem *reg, + u32 mask, u8 idlest, const char *name) +{ + int i = 0, ena = 0; + + ena = (idlest) ? 0 : mask; + + /* Wait until module enters enabled state */ + for (i = 0; i < MAX_MODULE_ENABLE_WAIT; i++) { + if ((ti_clk_ll_ops->clk_readl(reg) & mask) == ena) + break; + udelay(1); + } + + if (i < MAX_MODULE_ENABLE_WAIT) + pr_debug("omap clock: module associated with clock %s ready after %d loops\n", + name, i); + else + pr_err("omap clock: module associated with clock %s didn't enable in %d tries\n", + name, MAX_MODULE_ENABLE_WAIT); + + return (i < MAX_MODULE_ENABLE_WAIT) ? 1 : 0; +} + +/** + * _omap2_module_wait_ready - wait for an OMAP module to leave IDLE + * @clk: struct clk * belonging to the module + * + * If the necessary clocks for the OMAP hardware IP block that + * corresponds to clock @clk are enabled, then wait for the module to + * indicate readiness (i.e., to leave IDLE). This code does not + * belong in the clock code and will be moved in the medium term to + * module-dependent code. No return value. + */ +static void _omap2_module_wait_ready(struct clk_hw_omap *clk) +{ + void __iomem *companion_reg, *idlest_reg; + u8 other_bit, idlest_bit, idlest_val, idlest_reg_id; + s16 prcm_mod; + int r; + + /* Not all modules have multiple clocks that their IDLEST depends on */ + if (clk->ops->find_companion) { + clk->ops->find_companion(clk, &companion_reg, &other_bit); + if (!(ti_clk_ll_ops->clk_readl(companion_reg) & + (1 << other_bit))) + return; + } + + clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val); + r = ti_clk_ll_ops->cm_split_idlest_reg(idlest_reg, &prcm_mod, + &idlest_reg_id); + if (r) { + /* IDLEST register not in the CM module */ + _wait_idlest_generic(clk, idlest_reg, (1 << idlest_bit), + idlest_val, clk_hw_get_name(&clk->hw)); + } else { + ti_clk_ll_ops->cm_wait_module_ready(0, prcm_mod, idlest_reg_id, + idlest_bit); + } +} + +/** + * omap2_clk_dflt_find_companion - find companion clock to @clk + * @clk: struct clk * to find the companion clock of + * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in + * @other_bit: u8 ** to return the companion clock bit shift in + * + * Note: We don't need special code here for INVERT_ENABLE for the + * time being since INVERT_ENABLE only applies to clocks enabled by + * CM_CLKEN_PLL + * + * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes it's + * just a matter of XORing the bits. + * + * Some clocks don't have companion clocks. For example, modules with + * only an interface clock (such as MAILBOXES) don't have a companion + * clock. Right now, this code relies on the hardware exporting a bit + * in the correct companion register that indicates that the + * nonexistent 'companion clock' is active. Future patches will + * associate this type of code with per-module data structures to + * avoid this issue, and remove the casts. No return value. + */ +void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk, + void __iomem **other_reg, u8 *other_bit) +{ + u32 r; + + /* + * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes + * it's just a matter of XORing the bits. + */ + r = ((__force u32)clk->enable_reg ^ (CM_FCLKEN ^ CM_ICLKEN)); + + *other_reg = (__force void __iomem *)r; + *other_bit = clk->enable_bit; +} + +/** + * omap2_clk_dflt_find_idlest - find CM_IDLEST reg va, bit shift for @clk + * @clk: struct clk * to find IDLEST info for + * @idlest_reg: void __iomem ** to return the CM_IDLEST va in + * @idlest_bit: u8 * to return the CM_IDLEST bit shift in + * @idlest_val: u8 * to return the idle status indicator + * + * Return the CM_IDLEST register address and bit shift corresponding + * to the module that "owns" this clock. This default code assumes + * that the CM_IDLEST bit shift is the CM_*CLKEN bit shift, and that + * the IDLEST register address ID corresponds to the CM_*CLKEN + * register address ID (e.g., that CM_FCLKEN2 corresponds to + * CM_IDLEST2). This is not true for all modules. No return value. + */ +void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk, + void __iomem **idlest_reg, u8 *idlest_bit, + u8 *idlest_val) +{ + u32 r; + + r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); + *idlest_reg = (__force void __iomem *)r; + *idlest_bit = clk->enable_bit; + + /* + * 24xx uses 0 to indicate not ready, and 1 to indicate ready. + * 34xx reverses this, just to keep us on our toes + * AM35xx uses both, depending on the module. + */ + *idlest_val = ti_clk_get_features()->cm_idlest_val; +} + +/** + * omap2_dflt_clk_enable - enable a clock in the hardware + * @hw: struct clk_hw * of the clock to enable + * + * Enable the clock @hw in the hardware. We first call into the OMAP + * clockdomain code to "enable" the corresponding clockdomain if this + * is the first enabled user of the clockdomain. Then program the + * hardware to enable the clock. Then wait for the IP block that uses + * this clock to leave idle (if applicable). Returns the error value + * from clkdm_clk_enable() if it terminated with an error, or -EINVAL + * if @hw has a null clock enable_reg, or zero upon success. + */ +int omap2_dflt_clk_enable(struct clk_hw *hw) +{ + struct clk_hw_omap *clk; + u32 v; + int ret = 0; + bool clkdm_control; + + if (ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) + clkdm_control = false; + else + clkdm_control = true; + + clk = to_clk_hw_omap(hw); + + if (clkdm_control && clk->clkdm) { + ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk); + if (ret) { + WARN(1, + "%s: could not enable %s's clockdomain %s: %d\n", + __func__, clk_hw_get_name(hw), + clk->clkdm_name, ret); + return ret; + } + } + + if (unlikely(IS_ERR(clk->enable_reg))) { + pr_err("%s: %s missing enable_reg\n", __func__, + clk_hw_get_name(hw)); + ret = -EINVAL; + goto err; + } + + /* FIXME should not have INVERT_ENABLE bit here */ + v = ti_clk_ll_ops->clk_readl(clk->enable_reg); + if (clk->flags & INVERT_ENABLE) + v &= ~(1 << clk->enable_bit); + else + v |= (1 << clk->enable_bit); + ti_clk_ll_ops->clk_writel(v, clk->enable_reg); + v = ti_clk_ll_ops->clk_readl(clk->enable_reg); /* OCP barrier */ + + if (clk->ops && clk->ops->find_idlest) + _omap2_module_wait_ready(clk); + + return 0; + +err: + if (clkdm_control && clk->clkdm) + ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk); + return ret; +} + +/** + * omap2_dflt_clk_disable - disable a clock in the hardware + * @hw: struct clk_hw * of the clock to disable + * + * Disable the clock @hw in the hardware, and call into the OMAP + * clockdomain code to "disable" the corresponding clockdomain if all + * clocks/hwmods in that clockdomain are now disabled. No return + * value. + */ +void omap2_dflt_clk_disable(struct clk_hw *hw) +{ + struct clk_hw_omap *clk; + u32 v; + + clk = to_clk_hw_omap(hw); + if (IS_ERR(clk->enable_reg)) { + /* + * 'independent' here refers to a clock which is not + * controlled by its parent. + */ + pr_err("%s: independent clock %s has no enable_reg\n", + __func__, clk_hw_get_name(hw)); + return; + } + + v = ti_clk_ll_ops->clk_readl(clk->enable_reg); + if (clk->flags & INVERT_ENABLE) + v |= (1 << clk->enable_bit); + else + v &= ~(1 << clk->enable_bit); + ti_clk_ll_ops->clk_writel(v, clk->enable_reg); + /* No OCP barrier needed here since it is a disable operation */ + + if (!(ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) && + clk->clkdm) + ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk); +} + +/** + * omap2_dflt_clk_is_enabled - is clock enabled in the hardware? + * @hw: struct clk_hw * to check + * + * Return 1 if the clock represented by @hw is enabled in the + * hardware, or 0 otherwise. Intended for use in the struct + * clk_ops.is_enabled function pointer. + */ +int omap2_dflt_clk_is_enabled(struct clk_hw *hw) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + u32 v; + + v = ti_clk_ll_ops->clk_readl(clk->enable_reg); + + if (clk->flags & INVERT_ENABLE) + v ^= BIT(clk->enable_bit); + + v &= BIT(clk->enable_bit); + + return v ? 1 : 0; +} + +const struct clk_hw_omap_ops clkhwops_wait = { + .find_idlest = omap2_clk_dflt_find_idlest, + .find_companion = omap2_clk_dflt_find_companion, +}; diff --git a/drivers/clk/ti/clkt_dpll.c b/drivers/clk/ti/clkt_dpll.c new file mode 100644 index 000000000..9023ca9ca --- /dev/null +++ b/drivers/clk/ti/clkt_dpll.c @@ -0,0 +1,370 @@ +/* + * OMAP2/3/4 DPLL clock functions + * + * Copyright (C) 2005-2008 Texas Instruments, Inc. + * Copyright (C) 2004-2010 Nokia Corporation + * + * Contacts: + * Richard Woodruff + * Paul Walmsley + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#undef DEBUG + +#include +#include +#include +#include +#include +#include + +#include + +#include "clock.h" + +/* DPLL rate rounding: minimum DPLL multiplier, divider values */ +#define DPLL_MIN_MULTIPLIER 2 +#define DPLL_MIN_DIVIDER 1 + +/* Possible error results from _dpll_test_mult */ +#define DPLL_MULT_UNDERFLOW -1 + +/* + * Scale factor to mitigate roundoff errors in DPLL rate rounding. + * The higher the scale factor, the greater the risk of arithmetic overflow, + * but the closer the rounded rate to the target rate. DPLL_SCALE_FACTOR + * must be a power of DPLL_SCALE_BASE. + */ +#define DPLL_SCALE_FACTOR 64 +#define DPLL_SCALE_BASE 2 +#define DPLL_ROUNDING_VAL ((DPLL_SCALE_BASE / 2) * \ + (DPLL_SCALE_FACTOR / DPLL_SCALE_BASE)) + +/* + * DPLL valid Fint frequency range for OMAP36xx and OMAP4xxx. + * From device data manual section 4.3 "DPLL and DLL Specifications". + */ +#define OMAP3PLUS_DPLL_FINT_JTYPE_MIN 500000 +#define OMAP3PLUS_DPLL_FINT_JTYPE_MAX 2500000 + +/* _dpll_test_fint() return codes */ +#define DPLL_FINT_UNDERFLOW -1 +#define DPLL_FINT_INVALID -2 + +/* Private functions */ + +/* + * _dpll_test_fint - test whether an Fint value is valid for the DPLL + * @clk: DPLL struct clk to test + * @n: divider value (N) to test + * + * Tests whether a particular divider @n will result in a valid DPLL + * internal clock frequency Fint. See the 34xx TRM 4.7.6.2 "DPLL Jitter + * Correction". Returns 0 if OK, -1 if the enclosing loop can terminate + * (assuming that it is counting N upwards), or -2 if the enclosing loop + * should skip to the next iteration (again assuming N is increasing). + */ +static int _dpll_test_fint(struct clk_hw_omap *clk, unsigned int n) +{ + struct dpll_data *dd; + long fint, fint_min, fint_max; + int ret = 0; + + dd = clk->dpll_data; + + /* DPLL divider must result in a valid jitter correction val */ + fint = clk_hw_get_rate(clk_hw_get_parent(&clk->hw)) / n; + + if (dd->flags & DPLL_J_TYPE) { + fint_min = OMAP3PLUS_DPLL_FINT_JTYPE_MIN; + fint_max = OMAP3PLUS_DPLL_FINT_JTYPE_MAX; + } else { + fint_min = ti_clk_get_features()->fint_min; + fint_max = ti_clk_get_features()->fint_max; + } + + if (!fint_min || !fint_max) { + WARN(1, "No fint limits available!\n"); + return DPLL_FINT_INVALID; + } + + if (fint < ti_clk_get_features()->fint_min) { + pr_debug("rejecting n=%d due to Fint failure, lowering max_divider\n", + n); + dd->max_divider = n; + ret = DPLL_FINT_UNDERFLOW; + } else if (fint > ti_clk_get_features()->fint_max) { + pr_debug("rejecting n=%d due to Fint failure, boosting min_divider\n", + n); + dd->min_divider = n; + ret = DPLL_FINT_INVALID; + } else if (fint > ti_clk_get_features()->fint_band1_max && + fint < ti_clk_get_features()->fint_band2_min) { + pr_debug("rejecting n=%d due to Fint failure\n", n); + ret = DPLL_FINT_INVALID; + } + + return ret; +} + +static unsigned long _dpll_compute_new_rate(unsigned long parent_rate, + unsigned int m, unsigned int n) +{ + unsigned long long num; + + num = (unsigned long long)parent_rate * m; + do_div(num, n); + return num; +} + +/* + * _dpll_test_mult - test a DPLL multiplier value + * @m: pointer to the DPLL m (multiplier) value under test + * @n: current DPLL n (divider) value under test + * @new_rate: pointer to storage for the resulting rounded rate + * @target_rate: the desired DPLL rate + * @parent_rate: the DPLL's parent clock rate + * + * This code tests a DPLL multiplier value, ensuring that the + * resulting rate will not be higher than the target_rate, and that + * the multiplier value itself is valid for the DPLL. Initially, the + * integer pointed to by the m argument should be prescaled by + * multiplying by DPLL_SCALE_FACTOR. The code will replace this with + * a non-scaled m upon return. This non-scaled m will result in a + * new_rate as close as possible to target_rate (but not greater than + * target_rate) given the current (parent_rate, n, prescaled m) + * triple. Returns DPLL_MULT_UNDERFLOW in the event that the + * non-scaled m attempted to underflow, which can allow the calling + * function to bail out early; or 0 upon success. + */ +static int _dpll_test_mult(int *m, int n, unsigned long *new_rate, + unsigned long target_rate, + unsigned long parent_rate) +{ + int r = 0, carry = 0; + + /* Unscale m and round if necessary */ + if (*m % DPLL_SCALE_FACTOR >= DPLL_ROUNDING_VAL) + carry = 1; + *m = (*m / DPLL_SCALE_FACTOR) + carry; + + /* + * The new rate must be <= the target rate to avoid programming + * a rate that is impossible for the hardware to handle + */ + *new_rate = _dpll_compute_new_rate(parent_rate, *m, n); + if (*new_rate > target_rate) { + (*m)--; + *new_rate = 0; + } + + /* Guard against m underflow */ + if (*m < DPLL_MIN_MULTIPLIER) { + *m = DPLL_MIN_MULTIPLIER; + *new_rate = 0; + r = DPLL_MULT_UNDERFLOW; + } + + if (*new_rate == 0) + *new_rate = _dpll_compute_new_rate(parent_rate, *m, n); + + return r; +} + +/** + * _omap2_dpll_is_in_bypass - check if DPLL is in bypass mode or not + * @v: bitfield value of the DPLL enable + * + * Checks given DPLL enable bitfield to see whether the DPLL is in bypass + * mode or not. Returns 1 if the DPLL is in bypass, 0 otherwise. + */ +static int _omap2_dpll_is_in_bypass(u32 v) +{ + u8 mask, val; + + mask = ti_clk_get_features()->dpll_bypass_vals; + + /* + * Each set bit in the mask corresponds to a bypass value equal + * to the bitshift. Go through each set-bit in the mask and + * compare against the given register value. + */ + while (mask) { + val = __ffs(mask); + mask ^= (1 << val); + if (v == val) + return 1; + } + + return 0; +} + +/* Public functions */ +u8 omap2_init_dpll_parent(struct clk_hw *hw) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + u32 v; + struct dpll_data *dd; + + dd = clk->dpll_data; + if (!dd) + return -EINVAL; + + v = ti_clk_ll_ops->clk_readl(dd->control_reg); + v &= dd->enable_mask; + v >>= __ffs(dd->enable_mask); + + /* Reparent the struct clk in case the dpll is in bypass */ + if (_omap2_dpll_is_in_bypass(v)) + return 1; + + return 0; +} + +/** + * omap2_get_dpll_rate - returns the current DPLL CLKOUT rate + * @clk: struct clk * of a DPLL + * + * DPLLs can be locked or bypassed - basically, enabled or disabled. + * When locked, the DPLL output depends on the M and N values. When + * bypassed, on OMAP2xxx, the output rate is either the 32KiHz clock + * or sys_clk. Bypass rates on OMAP3 depend on the DPLL: DPLLs 1 and + * 2 are bypassed with dpll1_fclk and dpll2_fclk respectively + * (generated by DPLL3), while DPLL 3, 4, and 5 bypass rates are sys_clk. + * Returns the current DPLL CLKOUT rate (*not* CLKOUTX2) if the DPLL is + * locked, or the appropriate bypass rate if the DPLL is bypassed, or 0 + * if the clock @clk is not a DPLL. + */ +unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk) +{ + long long dpll_clk; + u32 dpll_mult, dpll_div, v; + struct dpll_data *dd; + + dd = clk->dpll_data; + if (!dd) + return 0; + + /* Return bypass rate if DPLL is bypassed */ + v = ti_clk_ll_ops->clk_readl(dd->control_reg); + v &= dd->enable_mask; + v >>= __ffs(dd->enable_mask); + + if (_omap2_dpll_is_in_bypass(v)) + return clk_get_rate(dd->clk_bypass); + + v = ti_clk_ll_ops->clk_readl(dd->mult_div1_reg); + dpll_mult = v & dd->mult_mask; + dpll_mult >>= __ffs(dd->mult_mask); + dpll_div = v & dd->div1_mask; + dpll_div >>= __ffs(dd->div1_mask); + + dpll_clk = (long long)clk_get_rate(dd->clk_ref) * dpll_mult; + do_div(dpll_clk, dpll_div + 1); + + return dpll_clk; +} + +/* DPLL rate rounding code */ + +/** + * omap2_dpll_round_rate - round a target rate for an OMAP DPLL + * @clk: struct clk * for a DPLL + * @target_rate: desired DPLL clock rate + * + * Given a DPLL and a desired target rate, round the target rate to a + * possible, programmable rate for this DPLL. Attempts to select the + * minimum possible n. Stores the computed (m, n) in the DPLL's + * dpll_data structure so set_rate() will not need to call this + * (expensive) function again. Returns ~0 if the target rate cannot + * be rounded, or the rounded rate upon success. + */ +long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate, + unsigned long *parent_rate) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + int m, n, r, scaled_max_m; + int min_delta_m = INT_MAX, min_delta_n = INT_MAX; + unsigned long scaled_rt_rp; + unsigned long new_rate = 0; + struct dpll_data *dd; + unsigned long ref_rate; + long delta; + long prev_min_delta = LONG_MAX; + const char *clk_name; + + if (!clk || !clk->dpll_data) + return ~0; + + dd = clk->dpll_data; + + ref_rate = clk_get_rate(dd->clk_ref); + clk_name = clk_hw_get_name(hw); + pr_debug("clock: %s: starting DPLL round_rate, target rate %lu\n", + clk_name, target_rate); + + scaled_rt_rp = target_rate / (ref_rate / DPLL_SCALE_FACTOR); + scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR; + + dd->last_rounded_rate = 0; + + for (n = dd->min_divider; n <= dd->max_divider; n++) { + /* Is the (input clk, divider) pair valid for the DPLL? */ + r = _dpll_test_fint(clk, n); + if (r == DPLL_FINT_UNDERFLOW) + break; + else if (r == DPLL_FINT_INVALID) + continue; + + /* Compute the scaled DPLL multiplier, based on the divider */ + m = scaled_rt_rp * n; + + /* + * Since we're counting n up, a m overflow means we + * can bail out completely (since as n increases in + * the next iteration, there's no way that m can + * increase beyond the current m) + */ + if (m > scaled_max_m) + break; + + r = _dpll_test_mult(&m, n, &new_rate, target_rate, + ref_rate); + + /* m can't be set low enough for this n - try with a larger n */ + if (r == DPLL_MULT_UNDERFLOW) + continue; + + /* skip rates above our target rate */ + delta = target_rate - new_rate; + if (delta < 0) + continue; + + if (delta < prev_min_delta) { + prev_min_delta = delta; + min_delta_m = m; + min_delta_n = n; + } + + pr_debug("clock: %s: m = %d: n = %d: new_rate = %lu\n", + clk_name, m, n, new_rate); + + if (delta == 0) + break; + } + + if (prev_min_delta == LONG_MAX) { + pr_debug("clock: %s: cannot round to rate %lu\n", + clk_name, target_rate); + return ~0; + } + + dd->last_rounded_m = min_delta_m; + dd->last_rounded_n = min_delta_n; + dd->last_rounded_rate = target_rate - prev_min_delta; + + return dd->last_rounded_rate; +} diff --git a/drivers/clk/ti/clkt_iclk.c b/drivers/clk/ti/clkt_iclk.c new file mode 100644 index 000000000..38c36908c --- /dev/null +++ b/drivers/clk/ti/clkt_iclk.c @@ -0,0 +1,101 @@ +/* + * OMAP2/3 interface clock control + * + * Copyright (C) 2011 Nokia Corporation + * Paul Walmsley + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#undef DEBUG + +#include +#include +#include +#include + +#include "clock.h" + +/* Register offsets */ +#define OMAP24XX_CM_FCLKEN2 0x04 +#define CM_AUTOIDLE 0x30 +#define CM_ICLKEN 0x10 +#define CM_IDLEST 0x20 + +#define OMAP24XX_CM_IDLEST_VAL 0 + +/* Private functions */ + +/* XXX */ +void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk) +{ + u32 v; + void __iomem *r; + + r = (__force void __iomem *) + ((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN)); + + v = ti_clk_ll_ops->clk_readl(r); + v |= (1 << clk->enable_bit); + ti_clk_ll_ops->clk_writel(v, r); +} + +/* XXX */ +void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk) +{ + u32 v; + void __iomem *r; + + r = (__force void __iomem *) + ((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN)); + + v = ti_clk_ll_ops->clk_readl(r); + v &= ~(1 << clk->enable_bit); + ti_clk_ll_ops->clk_writel(v, r); +} + +/** + * omap2430_clk_i2chs_find_idlest - return CM_IDLEST info for 2430 I2CHS + * @clk: struct clk * being enabled + * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into + * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into + * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator + * + * OMAP2430 I2CHS CM_IDLEST bits are in CM_IDLEST1_CORE, but the + * CM_*CLKEN bits are in CM_{I,F}CLKEN2_CORE. This custom function + * passes back the correct CM_IDLEST register address for I2CHS + * modules. No return value. + */ +static void omap2430_clk_i2chs_find_idlest(struct clk_hw_omap *clk, + void __iomem **idlest_reg, + u8 *idlest_bit, + u8 *idlest_val) +{ + u32 r; + + r = ((__force u32)clk->enable_reg ^ (OMAP24XX_CM_FCLKEN2 ^ CM_IDLEST)); + *idlest_reg = (__force void __iomem *)r; + *idlest_bit = clk->enable_bit; + *idlest_val = OMAP24XX_CM_IDLEST_VAL; +} + +/* Public data */ + +const struct clk_hw_omap_ops clkhwops_iclk = { + .allow_idle = omap2_clkt_iclk_allow_idle, + .deny_idle = omap2_clkt_iclk_deny_idle, +}; + +const struct clk_hw_omap_ops clkhwops_iclk_wait = { + .allow_idle = omap2_clkt_iclk_allow_idle, + .deny_idle = omap2_clkt_iclk_deny_idle, + .find_idlest = omap2_clk_dflt_find_idlest, + .find_companion = omap2_clk_dflt_find_companion, +}; + +/* 2430 I2CHS has non-standard IDLEST register */ +const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait = { + .find_idlest = omap2430_clk_i2chs_find_idlest, + .find_companion = omap2_clk_dflt_find_companion, +}; diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h index 404158d2d..90f3f472a 100644 --- a/drivers/clk/ti/clock.h +++ b/drivers/clk/ti/clock.h @@ -154,6 +154,35 @@ struct ti_clk_dpll { u8 recal_st_bit; }; +/* Composite clock component types */ +enum { + CLK_COMPONENT_TYPE_GATE = 0, + CLK_COMPONENT_TYPE_DIVIDER, + CLK_COMPONENT_TYPE_MUX, + CLK_COMPONENT_TYPE_MAX, +}; + +/** + * struct ti_dt_clk - OMAP DT clock alias declarations + * @lk: clock lookup definition + * @node_name: clock DT node to map to + */ +struct ti_dt_clk { + struct clk_lookup lk; + char *node_name; +}; + +#define DT_CLK(dev, con, name) \ + { \ + .lk = { \ + .dev_id = dev, \ + .con_id = con, \ + }, \ + .node_name = name, \ + } + +typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *); + struct clk *ti_clk_register_gate(struct ti_clk *setup); struct clk *ti_clk_register_interface(struct ti_clk *setup); struct clk *ti_clk_register_mux(struct ti_clk *setup); @@ -169,4 +198,80 @@ void ti_clk_patch_legacy_clks(struct ti_clk **patch); struct clk *ti_clk_register_clk(struct ti_clk *setup); int ti_clk_register_legacy_clks(struct ti_clk_alias *clks); +void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index); +void ti_dt_clocks_register(struct ti_dt_clk *oclks); +int ti_clk_retry_init(struct device_node *node, struct clk_hw *hw, + ti_of_clk_init_cb_t func); +int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type); + +void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw); +int of_ti_clk_autoidle_setup(struct device_node *node); +void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks); + +extern const struct clk_hw_omap_ops clkhwops_omap3_dpll; +extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx; +extern const struct clk_hw_omap_ops clkhwops_wait; +extern const struct clk_hw_omap_ops clkhwops_iclk; +extern const struct clk_hw_omap_ops clkhwops_iclk_wait; +extern const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait; +extern const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait; +extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait; +extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait; +extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait; +extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait; +extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait; + +extern const struct clk_ops ti_clk_divider_ops; +extern const struct clk_ops ti_clk_mux_ops; + +int omap2_clkops_enable_clkdm(struct clk_hw *hw); +void omap2_clkops_disable_clkdm(struct clk_hw *hw); + +int omap2_dflt_clk_enable(struct clk_hw *hw); +void omap2_dflt_clk_disable(struct clk_hw *hw); +int omap2_dflt_clk_is_enabled(struct clk_hw *hw); +void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk, + void __iomem **other_reg, + u8 *other_bit); +void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk, + void __iomem **idlest_reg, + u8 *idlest_bit, u8 *idlest_val); + +void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk); +void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk); + +u8 omap2_init_dpll_parent(struct clk_hw *hw); +int omap3_noncore_dpll_enable(struct clk_hw *hw); +void omap3_noncore_dpll_disable(struct clk_hw *hw); +int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index); +int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate); +int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw, + unsigned long rate, + unsigned long parent_rate, + u8 index); +int omap3_noncore_dpll_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req); +long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate, + unsigned long *parent_rate); +unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, + unsigned long parent_rate); + +unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate); +int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate, + unsigned long parent_rate); +int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate, u8 index); +void omap3_clk_lock_dpll5(void); + +unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw, + unsigned long parent_rate); +long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw, + unsigned long target_rate, + unsigned long *parent_rate); +int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req); + +extern struct ti_clk_ll_ops *ti_clk_ll_ops; + #endif diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c index b82ef07f3..b9bc3b8df 100644 --- a/drivers/clk/ti/clockdomain.c +++ b/drivers/clk/ti/clockdomain.c @@ -15,15 +15,94 @@ * GNU General Public License for more details. */ +#include #include #include #include #include #include +#include "clock.h" + #undef pr_fmt #define pr_fmt(fmt) "%s: " fmt, __func__ +/** + * omap2_clkops_enable_clkdm - increment usecount on clkdm of @hw + * @hw: struct clk_hw * of the clock being enabled + * + * Increment the usecount of the clockdomain of the clock pointed to + * by @hw; if the usecount is 1, the clockdomain will be "enabled." + * Only needed for clocks that don't use omap2_dflt_clk_enable() as + * their enable function pointer. Passes along the return value of + * clkdm_clk_enable(), -EINVAL if @hw is not associated with a + * clockdomain, or 0 if clock framework-based clockdomain control is + * not implemented. + */ +int omap2_clkops_enable_clkdm(struct clk_hw *hw) +{ + struct clk_hw_omap *clk; + int ret = 0; + + clk = to_clk_hw_omap(hw); + + if (unlikely(!clk->clkdm)) { + pr_err("%s: %s: no clkdm set ?!\n", __func__, + clk_hw_get_name(hw)); + return -EINVAL; + } + + if (unlikely(clk->enable_reg)) + pr_err("%s: %s: should use dflt_clk_enable ?!\n", __func__, + clk_hw_get_name(hw)); + + if (ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) { + pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n", + __func__, clk_hw_get_name(hw)); + return 0; + } + + ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk); + WARN(ret, "%s: could not enable %s's clockdomain %s: %d\n", + __func__, clk_hw_get_name(hw), clk->clkdm_name, ret); + + return ret; +} + +/** + * omap2_clkops_disable_clkdm - decrement usecount on clkdm of @hw + * @hw: struct clk_hw * of the clock being disabled + * + * Decrement the usecount of the clockdomain of the clock pointed to + * by @hw; if the usecount is 0, the clockdomain will be "disabled." + * Only needed for clocks that don't use omap2_dflt_clk_disable() as their + * disable function pointer. No return value. + */ +void omap2_clkops_disable_clkdm(struct clk_hw *hw) +{ + struct clk_hw_omap *clk; + + clk = to_clk_hw_omap(hw); + + if (unlikely(!clk->clkdm)) { + pr_err("%s: %s: no clkdm set ?!\n", __func__, + clk_hw_get_name(hw)); + return; + } + + if (unlikely(clk->enable_reg)) + pr_err("%s: %s: should use dflt_clk_disable ?!\n", __func__, + clk_hw_get_name(hw)); + + if (ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) { + pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n", + __func__, clk_hw_get_name(hw)); + return; + } + + ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk); +} + static void __init of_ti_clockdomain_setup(struct device_node *node) { struct clk *clk; @@ -41,12 +120,12 @@ static void __init of_ti_clockdomain_setup(struct device_node *node) __func__, node->full_name, i, PTR_ERR(clk)); continue; } - if (__clk_get_flags(clk) & CLK_IS_BASIC) { + clk_hw = __clk_get_hw(clk); + if (clk_hw_get_flags(clk_hw) & CLK_IS_BASIC) { pr_warn("can't setup clkdm for basic clk %s\n", __clk_get_name(clk)); continue; } - clk_hw = __clk_get_hw(clk); to_clk_hw_omap(clk_hw)->clkdm_name = clkdm_name; omap2_init_clk_clkdm(clk_hw); } diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c index 96f83cedb..dbef218fe 100644 --- a/drivers/clk/ti/composite.c +++ b/drivers/clk/ti/composite.c @@ -276,7 +276,6 @@ int __init ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int num_parents; const char **parent_names; struct component_clk *clk; - int i; num_parents = of_clk_get_parent_count(node); @@ -289,8 +288,7 @@ int __init ti_clk_add_component(struct device_node *node, struct clk_hw *hw, if (!parent_names) return -ENOMEM; - for (i = 0; i < num_parents; i++) - parent_names[i] = of_clk_get_parent_name(node, i); + of_clk_parent_fill(node, parent_names, num_parents); clk = kzalloc(sizeof(*clk), GFP_KERNEL); if (!clk) { diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c index ff5f11795..5b1726829 100644 --- a/drivers/clk/ti/divider.c +++ b/drivers/clk/ti/divider.c @@ -109,7 +109,7 @@ static unsigned long ti_clk_divider_recalc_rate(struct clk_hw *hw, if (!div) { WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO), "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n", - __clk_get_name(hw->clk)); + clk_hw_get_name(hw)); return parent_rate; } @@ -155,7 +155,7 @@ static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, maxdiv = _get_maxdiv(divider); - if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { + if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) { parent_rate = *best_parent_rate; bestdiv = DIV_ROUND_UP(parent_rate, rate); bestdiv = bestdiv == 0 ? 1 : bestdiv; @@ -181,7 +181,7 @@ static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, *best_parent_rate = parent_rate_saved; return i; } - parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), + parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), MULT_ROUND_UP(rate, i)); now = DIV_ROUND_UP(parent_rate, i); if (now <= rate && now > best) { @@ -194,7 +194,7 @@ static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, if (!bestdiv) { bestdiv = _get_maxdiv(divider); *best_parent_rate = - __clk_round_rate(__clk_get_parent(hw->clk), 1); + clk_hw_round_rate(clk_hw_get_parent(hw), 1); } return bestdiv; diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c index 2aacf7a3b..5519b386e 100644 --- a/drivers/clk/ti/dpll.c +++ b/drivers/clk/ti/dpll.c @@ -15,6 +15,7 @@ * GNU General Public License for more details. */ +#include #include #include #include @@ -162,7 +163,7 @@ static void __init _register_dpll(struct clk_hw *hw, clk = clk_register(NULL, &clk_hw->hw); if (!IS_ERR(clk)) { - omap2_init_clk_hw_omap_clocks(clk); + omap2_init_clk_hw_omap_clocks(&clk_hw->hw); of_clk_add_provider(node, of_clk_src_simple_get, clk); kfree(clk_hw->hw.init->parent_names); kfree(clk_hw->hw.init); @@ -319,7 +320,7 @@ static void _register_dpll_x2(struct device_node *node, if (IS_ERR(clk)) { kfree(clk_hw); } else { - omap2_init_clk_hw_omap_clocks(clk); + omap2_init_clk_hw_omap_clocks(&clk_hw->hw); of_clk_add_provider(node, of_clk_src_simple_get, clk); } } @@ -341,7 +342,6 @@ static void __init of_ti_dpll_setup(struct device_node *node, struct clk_init_data *init = NULL; const char **parent_names = NULL; struct dpll_data *dd = NULL; - int i; u8 dpll_mode = 0; dd = kzalloc(sizeof(*dd), GFP_KERNEL); @@ -370,8 +370,7 @@ static void __init of_ti_dpll_setup(struct device_node *node, if (!parent_names) goto cleanup; - for (i = 0; i < init->num_parents; i++) - parent_names[i] = of_clk_get_parent_name(node, i); + of_clk_parent_fill(node, parent_names, init->num_parents); init->parent_names = parent_names; diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c new file mode 100644 index 000000000..f4dec00fb --- /dev/null +++ b/drivers/clk/ti/dpll3xxx.c @@ -0,0 +1,817 @@ +/* + * OMAP3/4 - specific DPLL control functions + * + * Copyright (C) 2009-2010 Texas Instruments, Inc. + * Copyright (C) 2009-2010 Nokia Corporation + * + * Written by Paul Walmsley + * Testing and integration fixes by Jouni Högander + * + * 36xx support added by Vishwanath BS, Richard Woodruff, and Nishanth + * Menon + * + * Parts of this code are based on code written by + * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "clock.h" + +/* CM_AUTOIDLE_PLL*.AUTO_* bit values */ +#define DPLL_AUTOIDLE_DISABLE 0x0 +#define DPLL_AUTOIDLE_LOW_POWER_STOP 0x1 + +#define MAX_DPLL_WAIT_TRIES 1000000 + +#define OMAP3XXX_EN_DPLL_LOCKED 0x7 + +/* Forward declarations */ +static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk); +static void omap3_dpll_deny_idle(struct clk_hw_omap *clk); +static void omap3_dpll_allow_idle(struct clk_hw_omap *clk); + +/* Private functions */ + +/* _omap3_dpll_write_clken - write clken_bits arg to a DPLL's enable bits */ +static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits) +{ + const struct dpll_data *dd; + u32 v; + + dd = clk->dpll_data; + + v = ti_clk_ll_ops->clk_readl(dd->control_reg); + v &= ~dd->enable_mask; + v |= clken_bits << __ffs(dd->enable_mask); + ti_clk_ll_ops->clk_writel(v, dd->control_reg); +} + +/* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */ +static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state) +{ + const struct dpll_data *dd; + int i = 0; + int ret = -EINVAL; + const char *clk_name; + + dd = clk->dpll_data; + clk_name = clk_hw_get_name(&clk->hw); + + state <<= __ffs(dd->idlest_mask); + + while (((ti_clk_ll_ops->clk_readl(dd->idlest_reg) & dd->idlest_mask) + != state) && i < MAX_DPLL_WAIT_TRIES) { + i++; + udelay(1); + } + + if (i == MAX_DPLL_WAIT_TRIES) { + pr_err("clock: %s failed transition to '%s'\n", + clk_name, (state) ? "locked" : "bypassed"); + } else { + pr_debug("clock: %s transition to '%s' in %d loops\n", + clk_name, (state) ? "locked" : "bypassed", i); + + ret = 0; + } + + return ret; +} + +/* From 3430 TRM ES2 4.7.6.2 */ +static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n) +{ + unsigned long fint; + u16 f = 0; + + fint = clk_get_rate(clk->dpll_data->clk_ref) / n; + + pr_debug("clock: fint is %lu\n", fint); + + if (fint >= 750000 && fint <= 1000000) + f = 0x3; + else if (fint > 1000000 && fint <= 1250000) + f = 0x4; + else if (fint > 1250000 && fint <= 1500000) + f = 0x5; + else if (fint > 1500000 && fint <= 1750000) + f = 0x6; + else if (fint > 1750000 && fint <= 2100000) + f = 0x7; + else if (fint > 7500000 && fint <= 10000000) + f = 0xB; + else if (fint > 10000000 && fint <= 12500000) + f = 0xC; + else if (fint > 12500000 && fint <= 15000000) + f = 0xD; + else if (fint > 15000000 && fint <= 17500000) + f = 0xE; + else if (fint > 17500000 && fint <= 21000000) + f = 0xF; + else + pr_debug("clock: unknown freqsel setting for %d\n", n); + + return f; +} + +/* + * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness + * @clk: pointer to a DPLL struct clk + * + * Instructs a non-CORE DPLL to lock. Waits for the DPLL to report + * readiness before returning. Will save and restore the DPLL's + * autoidle state across the enable, per the CDP code. If the DPLL + * locked successfully, return 0; if the DPLL did not lock in the time + * allotted, or DPLL3 was passed in, return -EINVAL. + */ +static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk) +{ + const struct dpll_data *dd; + u8 ai; + u8 state = 1; + int r = 0; + + pr_debug("clock: locking DPLL %s\n", clk_hw_get_name(&clk->hw)); + + dd = clk->dpll_data; + state <<= __ffs(dd->idlest_mask); + + /* Check if already locked */ + if ((ti_clk_ll_ops->clk_readl(dd->idlest_reg) & dd->idlest_mask) == + state) + goto done; + + ai = omap3_dpll_autoidle_read(clk); + + if (ai) + omap3_dpll_deny_idle(clk); + + _omap3_dpll_write_clken(clk, DPLL_LOCKED); + + r = _omap3_wait_dpll_status(clk, 1); + + if (ai) + omap3_dpll_allow_idle(clk); + +done: + return r; +} + +/* + * _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness + * @clk: pointer to a DPLL struct clk + * + * Instructs a non-CORE DPLL to enter low-power bypass mode. In + * bypass mode, the DPLL's rate is set equal to its parent clock's + * rate. Waits for the DPLL to report readiness before returning. + * Will save and restore the DPLL's autoidle state across the enable, + * per the CDP code. If the DPLL entered bypass mode successfully, + * return 0; if the DPLL did not enter bypass in the time allotted, or + * DPLL3 was passed in, or the DPLL does not support low-power bypass, + * return -EINVAL. + */ +static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk) +{ + int r; + u8 ai; + + if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS))) + return -EINVAL; + + pr_debug("clock: configuring DPLL %s for low-power bypass\n", + clk_hw_get_name(&clk->hw)); + + ai = omap3_dpll_autoidle_read(clk); + + _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS); + + r = _omap3_wait_dpll_status(clk, 0); + + if (ai) + omap3_dpll_allow_idle(clk); + + return r; +} + +/* + * _omap3_noncore_dpll_stop - instruct a DPLL to stop + * @clk: pointer to a DPLL struct clk + * + * Instructs a non-CORE DPLL to enter low-power stop. Will save and + * restore the DPLL's autoidle state across the stop, per the CDP + * code. If DPLL3 was passed in, or the DPLL does not support + * low-power stop, return -EINVAL; otherwise, return 0. + */ +static int _omap3_noncore_dpll_stop(struct clk_hw_omap *clk) +{ + u8 ai; + + if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP))) + return -EINVAL; + + pr_debug("clock: stopping DPLL %s\n", clk_hw_get_name(&clk->hw)); + + ai = omap3_dpll_autoidle_read(clk); + + _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP); + + if (ai) + omap3_dpll_allow_idle(clk); + + return 0; +} + +/** + * _lookup_dco - Lookup DCO used by j-type DPLL + * @clk: pointer to a DPLL struct clk + * @dco: digital control oscillator selector + * @m: DPLL multiplier to set + * @n: DPLL divider to set + * + * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)" + * + * XXX This code is not needed for 3430/AM35xx; can it be optimized + * out in non-multi-OMAP builds for those chips? + */ +static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n) +{ + unsigned long fint, clkinp; /* watch out for overflow */ + + clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw)); + fint = (clkinp / n) * m; + + if (fint < 1000000000) + *dco = 2; + else + *dco = 4; +} + +/** + * _lookup_sddiv - Calculate sigma delta divider for j-type DPLL + * @clk: pointer to a DPLL struct clk + * @sd_div: target sigma-delta divider + * @m: DPLL multiplier to set + * @n: DPLL divider to set + * + * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)" + * + * XXX This code is not needed for 3430/AM35xx; can it be optimized + * out in non-multi-OMAP builds for those chips? + */ +static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n) +{ + unsigned long clkinp, sd; /* watch out for overflow */ + int mod1, mod2; + + clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw)); + + /* + * target sigma-delta to near 250MHz + * sd = ceil[(m/(n+1)) * (clkinp_MHz / 250)] + */ + clkinp /= 100000; /* shift from MHz to 10*Hz for 38.4 and 19.2 */ + mod1 = (clkinp * m) % (250 * n); + sd = (clkinp * m) / (250 * n); + mod2 = sd % 10; + sd /= 10; + + if (mod1 || mod2) + sd++; + *sd_div = sd; +} + +/* + * _omap3_noncore_dpll_program - set non-core DPLL M,N values directly + * @clk: struct clk * of DPLL to set + * @freqsel: FREQSEL value to set + * + * Program the DPLL with the last M, N values calculated, and wait for + * the DPLL to lock. Returns -EINVAL upon error, or 0 upon success. + */ +static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel) +{ + struct dpll_data *dd = clk->dpll_data; + u8 dco, sd_div; + u32 v; + + /* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */ + _omap3_noncore_dpll_bypass(clk); + + /* + * Set jitter correction. Jitter correction applicable for OMAP343X + * only since freqsel field is no longer present on other devices. + */ + if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) { + v = ti_clk_ll_ops->clk_readl(dd->control_reg); + v &= ~dd->freqsel_mask; + v |= freqsel << __ffs(dd->freqsel_mask); + ti_clk_ll_ops->clk_writel(v, dd->control_reg); + } + + /* Set DPLL multiplier, divider */ + v = ti_clk_ll_ops->clk_readl(dd->mult_div1_reg); + + /* Handle Duty Cycle Correction */ + if (dd->dcc_mask) { + if (dd->last_rounded_rate >= dd->dcc_rate) + v |= dd->dcc_mask; /* Enable DCC */ + else + v &= ~dd->dcc_mask; /* Disable DCC */ + } + + v &= ~(dd->mult_mask | dd->div1_mask); + v |= dd->last_rounded_m << __ffs(dd->mult_mask); + v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask); + + /* Configure dco and sd_div for dplls that have these fields */ + if (dd->dco_mask) { + _lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n); + v &= ~(dd->dco_mask); + v |= dco << __ffs(dd->dco_mask); + } + if (dd->sddiv_mask) { + _lookup_sddiv(clk, &sd_div, dd->last_rounded_m, + dd->last_rounded_n); + v &= ~(dd->sddiv_mask); + v |= sd_div << __ffs(dd->sddiv_mask); + } + + ti_clk_ll_ops->clk_writel(v, dd->mult_div1_reg); + + /* Set 4X multiplier and low-power mode */ + if (dd->m4xen_mask || dd->lpmode_mask) { + v = ti_clk_ll_ops->clk_readl(dd->control_reg); + + if (dd->m4xen_mask) { + if (dd->last_rounded_m4xen) + v |= dd->m4xen_mask; + else + v &= ~dd->m4xen_mask; + } + + if (dd->lpmode_mask) { + if (dd->last_rounded_lpmode) + v |= dd->lpmode_mask; + else + v &= ~dd->lpmode_mask; + } + + ti_clk_ll_ops->clk_writel(v, dd->control_reg); + } + + /* We let the clock framework set the other output dividers later */ + + /* REVISIT: Set ramp-up delay? */ + + _omap3_noncore_dpll_lock(clk); + + return 0; +} + +/* Public functions */ + +/** + * omap3_dpll_recalc - recalculate DPLL rate + * @clk: DPLL struct clk + * + * Recalculate and propagate the DPLL rate. + */ +unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + + return omap2_get_dpll_rate(clk); +} + +/* Non-CORE DPLL (e.g., DPLLs that do not control SDRC) clock functions */ + +/** + * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode + * @clk: pointer to a DPLL struct clk + * + * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock. + * The choice of modes depends on the DPLL's programmed rate: if it is + * the same as the DPLL's parent clock, it will enter bypass; + * otherwise, it will enter lock. This code will wait for the DPLL to + * indicate readiness before returning, unless the DPLL takes too long + * to enter the target state. Intended to be used as the struct clk's + * enable function. If DPLL3 was passed in, or the DPLL does not + * support low-power stop, or if the DPLL took too long to enter + * bypass or lock, return -EINVAL; otherwise, return 0. + */ +int omap3_noncore_dpll_enable(struct clk_hw *hw) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + int r; + struct dpll_data *dd; + struct clk_hw *parent; + + dd = clk->dpll_data; + if (!dd) + return -EINVAL; + + if (clk->clkdm) { + r = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk); + if (r) { + WARN(1, + "%s: could not enable %s's clockdomain %s: %d\n", + __func__, clk_hw_get_name(hw), + clk->clkdm_name, r); + return r; + } + } + + parent = clk_hw_get_parent(hw); + + if (clk_hw_get_rate(hw) == clk_get_rate(dd->clk_bypass)) { + WARN_ON(parent != __clk_get_hw(dd->clk_bypass)); + r = _omap3_noncore_dpll_bypass(clk); + } else { + WARN_ON(parent != __clk_get_hw(dd->clk_ref)); + r = _omap3_noncore_dpll_lock(clk); + } + + return r; +} + +/** + * omap3_noncore_dpll_disable - instruct a DPLL to enter low-power stop + * @clk: pointer to a DPLL struct clk + * + * Instructs a non-CORE DPLL to enter low-power stop. This function is + * intended for use in struct clkops. No return value. + */ +void omap3_noncore_dpll_disable(struct clk_hw *hw) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + + _omap3_noncore_dpll_stop(clk); + if (clk->clkdm) + ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk); +} + +/* Non-CORE DPLL rate set code */ + +/** + * omap3_noncore_dpll_determine_rate - determine rate for a DPLL + * @hw: pointer to the clock to determine rate for + * @req: target rate request + * + * Determines which DPLL mode to use for reaching a desired target rate. + * Checks whether the DPLL shall be in bypass or locked mode, and if + * locked, calculates the M,N values for the DPLL via round-rate. + * Returns a 0 on success, negative error value in failure. + */ +int omap3_noncore_dpll_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + struct dpll_data *dd; + + if (!req->rate) + return -EINVAL; + + dd = clk->dpll_data; + if (!dd) + return -EINVAL; + + if (clk_get_rate(dd->clk_bypass) == req->rate && + (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { + req->best_parent_hw = __clk_get_hw(dd->clk_bypass); + } else { + req->rate = omap2_dpll_round_rate(hw, req->rate, + &req->best_parent_rate); + req->best_parent_hw = __clk_get_hw(dd->clk_ref); + } + + req->best_parent_rate = req->rate; + + return 0; +} + +/** + * omap3_noncore_dpll_set_parent - set parent for a DPLL clock + * @hw: pointer to the clock to set parent for + * @index: parent index to select + * + * Sets parent for a DPLL clock. This sets the DPLL into bypass or + * locked mode. Returns 0 with success, negative error value otherwise. + */ +int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + int ret; + + if (!hw) + return -EINVAL; + + if (index) + ret = _omap3_noncore_dpll_bypass(clk); + else + ret = _omap3_noncore_dpll_lock(clk); + + return ret; +} + +/** + * omap3_noncore_dpll_set_rate - set rate for a DPLL clock + * @hw: pointer to the clock to set parent for + * @rate: target rate for the clock + * @parent_rate: rate of the parent clock + * + * Sets rate for a DPLL clock. First checks if the clock parent is + * reference clock (in bypass mode, the rate of the clock can't be + * changed) and proceeds with the rate change operation. Returns 0 + * with success, negative error value otherwise. + */ +int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + struct dpll_data *dd; + u16 freqsel = 0; + int ret; + + if (!hw || !rate) + return -EINVAL; + + dd = clk->dpll_data; + if (!dd) + return -EINVAL; + + if (clk_hw_get_parent(hw) != __clk_get_hw(dd->clk_ref)) + return -EINVAL; + + if (dd->last_rounded_rate == 0) + return -EINVAL; + + /* Freqsel is available only on OMAP343X devices */ + if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) { + freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n); + WARN_ON(!freqsel); + } + + pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__, + clk_hw_get_name(hw), rate); + + ret = omap3_noncore_dpll_program(clk, freqsel); + + return ret; +} + +/** + * omap3_noncore_dpll_set_rate_and_parent - set rate and parent for a DPLL clock + * @hw: pointer to the clock to set rate and parent for + * @rate: target rate for the DPLL + * @parent_rate: clock rate of the DPLL parent + * @index: new parent index for the DPLL, 0 - reference, 1 - bypass + * + * Sets rate and parent for a DPLL clock. If new parent is the bypass + * clock, only selects the parent. Otherwise proceeds with a rate + * change, as this will effectively also change the parent as the + * DPLL is put into locked mode. Returns 0 with success, negative error + * value otherwise. + */ +int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw, + unsigned long rate, + unsigned long parent_rate, + u8 index) +{ + int ret; + + if (!hw || !rate) + return -EINVAL; + + /* + * clk-ref at index[0], in which case we only need to set rate, + * the parent will be changed automatically with the lock sequence. + * With clk-bypass case we only need to change parent. + */ + if (index) + ret = omap3_noncore_dpll_set_parent(hw, index); + else + ret = omap3_noncore_dpll_set_rate(hw, rate, parent_rate); + + return ret; +} + +/* DPLL autoidle read/set code */ + +/** + * omap3_dpll_autoidle_read - read a DPLL's autoidle bits + * @clk: struct clk * of the DPLL to read + * + * Return the DPLL's autoidle bits, shifted down to bit 0. Returns + * -EINVAL if passed a null pointer or if the struct clk does not + * appear to refer to a DPLL. + */ +static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk) +{ + const struct dpll_data *dd; + u32 v; + + if (!clk || !clk->dpll_data) + return -EINVAL; + + dd = clk->dpll_data; + + if (!dd->autoidle_reg) + return -EINVAL; + + v = ti_clk_ll_ops->clk_readl(dd->autoidle_reg); + v &= dd->autoidle_mask; + v >>= __ffs(dd->autoidle_mask); + + return v; +} + +/** + * omap3_dpll_allow_idle - enable DPLL autoidle bits + * @clk: struct clk * of the DPLL to operate on + * + * Enable DPLL automatic idle control. This automatic idle mode + * switching takes effect only when the DPLL is locked, at least on + * OMAP3430. The DPLL will enter low-power stop when its downstream + * clocks are gated. No return value. + */ +static void omap3_dpll_allow_idle(struct clk_hw_omap *clk) +{ + const struct dpll_data *dd; + u32 v; + + if (!clk || !clk->dpll_data) + return; + + dd = clk->dpll_data; + + if (!dd->autoidle_reg) + return; + + /* + * REVISIT: CORE DPLL can optionally enter low-power bypass + * by writing 0x5 instead of 0x1. Add some mechanism to + * optionally enter this mode. + */ + v = ti_clk_ll_ops->clk_readl(dd->autoidle_reg); + v &= ~dd->autoidle_mask; + v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask); + ti_clk_ll_ops->clk_writel(v, dd->autoidle_reg); +} + +/** + * omap3_dpll_deny_idle - prevent DPLL from automatically idling + * @clk: struct clk * of the DPLL to operate on + * + * Disable DPLL automatic idle control. No return value. + */ +static void omap3_dpll_deny_idle(struct clk_hw_omap *clk) +{ + const struct dpll_data *dd; + u32 v; + + if (!clk || !clk->dpll_data) + return; + + dd = clk->dpll_data; + + if (!dd->autoidle_reg) + return; + + v = ti_clk_ll_ops->clk_readl(dd->autoidle_reg); + v &= ~dd->autoidle_mask; + v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask); + ti_clk_ll_ops->clk_writel(v, dd->autoidle_reg); +} + +/* Clock control for DPLL outputs */ + +/* Find the parent DPLL for the given clkoutx2 clock */ +static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw) +{ + struct clk_hw_omap *pclk = NULL; + + /* Walk up the parents of clk, looking for a DPLL */ + do { + do { + hw = clk_hw_get_parent(hw); + } while (hw && (clk_hw_get_flags(hw) & CLK_IS_BASIC)); + if (!hw) + break; + pclk = to_clk_hw_omap(hw); + } while (pclk && !pclk->dpll_data); + + /* clk does not have a DPLL as a parent? error in the clock data */ + if (!pclk) { + WARN_ON(1); + return NULL; + } + + return pclk; +} + +/** + * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate + * @clk: DPLL output struct clk + * + * Using parent clock DPLL data, look up DPLL state. If locked, set our + * rate to the dpll_clk * 2; otherwise, just use dpll_clk. + */ +unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, + unsigned long parent_rate) +{ + const struct dpll_data *dd; + unsigned long rate; + u32 v; + struct clk_hw_omap *pclk = NULL; + + if (!parent_rate) + return 0; + + pclk = omap3_find_clkoutx2_dpll(hw); + + if (!pclk) + return 0; + + dd = pclk->dpll_data; + + WARN_ON(!dd->enable_mask); + + v = ti_clk_ll_ops->clk_readl(dd->control_reg) & dd->enable_mask; + v >>= __ffs(dd->enable_mask); + if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE)) + rate = parent_rate; + else + rate = parent_rate * 2; + return rate; +} + +/* OMAP3/4 non-CORE DPLL clkops */ +const struct clk_hw_omap_ops clkhwops_omap3_dpll = { + .allow_idle = omap3_dpll_allow_idle, + .deny_idle = omap3_dpll_deny_idle, +}; + +/** + * omap3_dpll4_set_rate - set rate for omap3 per-dpll + * @hw: clock to change + * @rate: target rate for clock + * @parent_rate: rate of the parent clock + * + * Check if the current SoC supports the per-dpll reprogram operation + * or not, and then do the rate change if supported. Returns -EINVAL + * if not supported, 0 for success, and potential error codes from the + * clock rate change. + */ +int omap3_dpll4_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + /* + * According to the 12-5 CDP code from TI, "Limitation 2.5" + * on 3430ES1 prevents us from changing DPLL multipliers or dividers + * on DPLL4. + */ + if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) { + pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n"); + return -EINVAL; + } + + return omap3_noncore_dpll_set_rate(hw, rate, parent_rate); +} + +/** + * omap3_dpll4_set_rate_and_parent - set rate and parent for omap3 per-dpll + * @hw: clock to change + * @rate: target rate for clock + * @parent_rate: rate of the parent clock + * @index: parent index, 0 - reference clock, 1 - bypass clock + * + * Check if the current SoC support the per-dpll reprogram operation + * or not, and then do the rate + parent change if supported. Returns + * -EINVAL if not supported, 0 for success, and potential error codes + * from the clock rate change. + */ +int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate, u8 index) +{ + if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) { + pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n"); + return -EINVAL; + } + + return omap3_noncore_dpll_set_rate_and_parent(hw, rate, parent_rate, + index); +} diff --git a/drivers/clk/ti/dpll44xx.c b/drivers/clk/ti/dpll44xx.c new file mode 100644 index 000000000..660d7436a --- /dev/null +++ b/drivers/clk/ti/dpll44xx.c @@ -0,0 +1,227 @@ +/* + * OMAP4-specific DPLL control functions + * + * Copyright (C) 2011 Texas Instruments, Inc. + * Rajendra Nayak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +#include "clock.h" + +/* + * Maximum DPLL input frequency (FINT) and output frequency (FOUT) that + * can supported when using the DPLL low-power mode. Frequencies are + * defined in OMAP4430/60 Public TRM section 3.6.3.3.2 "Enable Control, + * Status, and Low-Power Operation Mode". + */ +#define OMAP4_DPLL_LP_FINT_MAX 1000000 +#define OMAP4_DPLL_LP_FOUT_MAX 100000000 + +/* + * Bitfield declarations + */ +#define OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK BIT(8) +#define OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK BIT(10) +#define OMAP4430_DPLL_REGM4XEN_MASK BIT(11) + +/* Static rate multiplier for OMAP4 REGM4XEN clocks */ +#define OMAP4430_REGM4XEN_MULT 4 + +static void omap4_dpllmx_allow_gatectrl(struct clk_hw_omap *clk) +{ + u32 v; + u32 mask; + + if (!clk || !clk->clksel_reg) + return; + + mask = clk->flags & CLOCK_CLKOUTX2 ? + OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK : + OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK; + + v = ti_clk_ll_ops->clk_readl(clk->clksel_reg); + /* Clear the bit to allow gatectrl */ + v &= ~mask; + ti_clk_ll_ops->clk_writel(v, clk->clksel_reg); +} + +static void omap4_dpllmx_deny_gatectrl(struct clk_hw_omap *clk) +{ + u32 v; + u32 mask; + + if (!clk || !clk->clksel_reg) + return; + + mask = clk->flags & CLOCK_CLKOUTX2 ? + OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK : + OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK; + + v = ti_clk_ll_ops->clk_readl(clk->clksel_reg); + /* Set the bit to deny gatectrl */ + v |= mask; + ti_clk_ll_ops->clk_writel(v, clk->clksel_reg); +} + +const struct clk_hw_omap_ops clkhwops_omap4_dpllmx = { + .allow_idle = omap4_dpllmx_allow_gatectrl, + .deny_idle = omap4_dpllmx_deny_gatectrl, +}; + +/** + * omap4_dpll_lpmode_recalc - compute DPLL low-power setting + * @dd: pointer to the dpll data structure + * + * Calculates if low-power mode can be enabled based upon the last + * multiplier and divider values calculated. If low-power mode can be + * enabled, then the bit to enable low-power mode is stored in the + * last_rounded_lpmode variable. This implementation is based upon the + * criteria for enabling low-power mode as described in the OMAP4430/60 + * Public TRM section 3.6.3.3.2 "Enable Control, Status, and Low-Power + * Operation Mode". + */ +static void omap4_dpll_lpmode_recalc(struct dpll_data *dd) +{ + long fint, fout; + + fint = clk_get_rate(dd->clk_ref) / (dd->last_rounded_n + 1); + fout = fint * dd->last_rounded_m; + + if ((fint < OMAP4_DPLL_LP_FINT_MAX) && (fout < OMAP4_DPLL_LP_FOUT_MAX)) + dd->last_rounded_lpmode = 1; + else + dd->last_rounded_lpmode = 0; +} + +/** + * omap4_dpll_regm4xen_recalc - compute DPLL rate, considering REGM4XEN bit + * @clk: struct clk * of the DPLL to compute the rate for + * + * Compute the output rate for the OMAP4 DPLL represented by @clk. + * Takes the REGM4XEN bit into consideration, which is needed for the + * OMAP4 ABE DPLL. Returns the DPLL's output rate (before M-dividers) + * upon success, or 0 upon error. + */ +unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + u32 v; + unsigned long rate; + struct dpll_data *dd; + + if (!clk || !clk->dpll_data) + return 0; + + dd = clk->dpll_data; + + rate = omap2_get_dpll_rate(clk); + + /* regm4xen adds a multiplier of 4 to DPLL calculations */ + v = ti_clk_ll_ops->clk_readl(dd->control_reg); + if (v & OMAP4430_DPLL_REGM4XEN_MASK) + rate *= OMAP4430_REGM4XEN_MULT; + + return rate; +} + +/** + * omap4_dpll_regm4xen_round_rate - round DPLL rate, considering REGM4XEN bit + * @clk: struct clk * of the DPLL to round a rate for + * @target_rate: the desired rate of the DPLL + * + * Compute the rate that would be programmed into the DPLL hardware + * for @clk if set_rate() were to be provided with the rate + * @target_rate. Takes the REGM4XEN bit into consideration, which is + * needed for the OMAP4 ABE DPLL. Returns the rounded rate (before + * M-dividers) upon success, -EINVAL if @clk is null or not a DPLL, or + * ~0 if an error occurred in omap2_dpll_round_rate(). + */ +long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw, + unsigned long target_rate, + unsigned long *parent_rate) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + struct dpll_data *dd; + long r; + + if (!clk || !clk->dpll_data) + return -EINVAL; + + dd = clk->dpll_data; + + dd->last_rounded_m4xen = 0; + + /* + * First try to compute the DPLL configuration for + * target rate without using the 4X multiplier. + */ + r = omap2_dpll_round_rate(hw, target_rate, NULL); + if (r != ~0) + goto out; + + /* + * If we did not find a valid DPLL configuration, try again, but + * this time see if using the 4X multiplier can help. Enabling the + * 4X multiplier is equivalent to dividing the target rate by 4. + */ + r = omap2_dpll_round_rate(hw, target_rate / OMAP4430_REGM4XEN_MULT, + NULL); + if (r == ~0) + return r; + + dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT; + dd->last_rounded_m4xen = 1; + +out: + omap4_dpll_lpmode_recalc(dd); + + return dd->last_rounded_rate; +} + +/** + * omap4_dpll_regm4xen_determine_rate - determine rate for a DPLL + * @hw: pointer to the clock to determine rate for + * @req: target rate request + * + * Determines which DPLL mode to use for reaching a desired rate. + * Checks whether the DPLL shall be in bypass or locked mode, and if + * locked, calculates the M,N values for the DPLL via round-rate. + * Returns 0 on success and a negative error value otherwise. + */ +int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + struct dpll_data *dd; + + if (!req->rate) + return -EINVAL; + + dd = clk->dpll_data; + if (!dd) + return -EINVAL; + + if (clk_get_rate(dd->clk_bypass) == req->rate && + (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { + req->best_parent_hw = __clk_get_hw(dd->clk_bypass); + } else { + req->rate = omap4_dpll_regm4xen_round_rate(hw, req->rate, + &req->best_parent_rate); + req->best_parent_hw = __clk_get_hw(dd->clk_ref); + } + + req->best_parent_rate = req->rate; + + return 0; +} diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c index 730aa6245..f4b2e9888 100644 --- a/drivers/clk/ti/fapll.c +++ b/drivers/clk/ti/fapll.c @@ -9,6 +9,7 @@ * GNU General Public License for more details. */ +#include #include #include #include @@ -558,8 +559,7 @@ static void __init ti_fapll_setup(struct device_node *node) goto free; } - parent_name[0] = of_clk_get_parent_name(node, 0); - parent_name[1] = of_clk_get_parent_name(node, 1); + of_clk_parent_fill(node, parent_name, 2); init->parent_names = parent_name; fd->clk_ref = of_clk_get(node, 0); diff --git a/drivers/clk/ti/fixed-factor.c b/drivers/clk/ti/fixed-factor.c index c2c8a2874..3cd406768 100644 --- a/drivers/clk/ti/fixed-factor.c +++ b/drivers/clk/ti/fixed-factor.c @@ -22,6 +22,8 @@ #include #include +#include "clock.h" + #undef pr_fmt #define pr_fmt(fmt) "%s: " fmt, __func__ diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c index 0c6fdfcd5..5429d3534 100644 --- a/drivers/clk/ti/gate.c +++ b/drivers/clk/ti/gate.c @@ -62,7 +62,7 @@ static const struct clk_ops omap_gate_clk_hsdiv_restore_ops = { * (Any other value different from the Read value) to the * corresponding CM_CLKSEL register will refresh the dividers. */ -static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *clk) +static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *hw) { struct clk_divider *parent; struct clk_hw *parent_hw; @@ -70,10 +70,10 @@ static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *clk) int ret; /* Clear PWRDN bit of HSDIVIDER */ - ret = omap2_dflt_clk_enable(clk); + ret = omap2_dflt_clk_enable(hw); /* Parent is the x2 node, get parent of parent for the m2 div */ - parent_hw = __clk_get_hw(__clk_get_parent(__clk_get_parent(clk->clk))); + parent_hw = clk_hw_get_parent(clk_hw_get_parent(hw)); parent = to_clk_divider(parent_hw); /* Restore the dividers */ diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c index c76230d8d..e505e6f82 100644 --- a/drivers/clk/ti/interface.c +++ b/drivers/clk/ti/interface.c @@ -63,7 +63,7 @@ static struct clk *_register_interface(struct device *dev, const char *name, if (IS_ERR(clk)) kfree(clk_hw); else - omap2_init_clk_hw_omap_clocks(clk); + omap2_init_clk_hw_omap_clocks(&clk_hw->hw); return clk; } diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c index 5cdeed538..69f08a1d0 100644 --- a/drivers/clk/ti/mux.c +++ b/drivers/clk/ti/mux.c @@ -31,7 +31,7 @@ static u8 ti_clk_mux_get_parent(struct clk_hw *hw) { struct clk_mux *mux = to_clk_mux(hw); - int num_parents = __clk_get_num_parents(hw->clk); + int num_parents = clk_hw_get_num_parents(hw); u32 val; /* @@ -190,7 +190,6 @@ static void of_mux_clk_setup(struct device_node *node) void __iomem *reg; int num_parents; const char **parent_names; - int i; u8 clk_mux_flags = 0; u32 mask = 0; u32 shift = 0; @@ -205,8 +204,7 @@ static void of_mux_clk_setup(struct device_node *node) if (!parent_names) goto cleanup; - for (i = 0; i < num_parents; i++) - parent_names[i] = of_clk_get_parent_name(node, i); + of_clk_parent_fill(node, parent_names, num_parents); reg = ti_clk_get_reg_addr(node, 0); diff --git a/drivers/clk/ux500/Makefile b/drivers/clk/ux500/Makefile index 521483f0b..f3baef298 100644 --- a/drivers/clk/ux500/Makefile +++ b/drivers/clk/ux500/Makefile @@ -9,7 +9,6 @@ obj-y += clk-sysctrl.o # Clock definitions obj-y += u8500_of_clk.o -obj-y += u8500_clk.o obj-y += u9540_clk.o obj-y += u8540_clk.o diff --git a/drivers/clk/ux500/abx500-clk.c b/drivers/clk/ux500/abx500-clk.c index 3e5e05101..222425d08 100644 --- a/drivers/clk/ux500/abx500-clk.c +++ b/drivers/clk/ux500/abx500-clk.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/clk/ux500/clk-prcmu.c b/drivers/clk/ux500/clk-prcmu.c index bf63c96ac..7f343821f 100644 --- a/drivers/clk/ux500/clk-prcmu.c +++ b/drivers/clk/ux500/clk-prcmu.c @@ -43,7 +43,7 @@ static void clk_prcmu_unprepare(struct clk_hw *hw) struct clk_prcmu *clk = to_clk_prcmu(hw); if (prcmu_request_clock(clk->cg_sel, false)) pr_err("clk_prcmu: %s failed to disable %s.\n", __func__, - __clk_get_name(hw->clk)); + clk_hw_get_name(hw)); else clk->is_prepared = 0; } @@ -101,11 +101,11 @@ static int clk_prcmu_opp_prepare(struct clk_hw *hw) if (!clk->opp_requested) { err = prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, - (char *)__clk_get_name(hw->clk), + (char *)clk_hw_get_name(hw), 100); if (err) { pr_err("clk_prcmu: %s fail req APE OPP for %s.\n", - __func__, __clk_get_name(hw->clk)); + __func__, clk_hw_get_name(hw)); return err; } clk->opp_requested = 1; @@ -114,7 +114,7 @@ static int clk_prcmu_opp_prepare(struct clk_hw *hw) err = prcmu_request_clock(clk->cg_sel, true); if (err) { prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, - (char *)__clk_get_name(hw->clk)); + (char *)clk_hw_get_name(hw)); clk->opp_requested = 0; return err; } @@ -129,13 +129,13 @@ static void clk_prcmu_opp_unprepare(struct clk_hw *hw) if (prcmu_request_clock(clk->cg_sel, false)) { pr_err("clk_prcmu: %s failed to disable %s.\n", __func__, - __clk_get_name(hw->clk)); + clk_hw_get_name(hw)); return; } if (clk->opp_requested) { prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, - (char *)__clk_get_name(hw->clk)); + (char *)clk_hw_get_name(hw)); clk->opp_requested = 0; } @@ -151,7 +151,7 @@ static int clk_prcmu_opp_volt_prepare(struct clk_hw *hw) err = prcmu_request_ape_opp_100_voltage(true); if (err) { pr_err("clk_prcmu: %s fail req APE OPP VOLT for %s.\n", - __func__, __clk_get_name(hw->clk)); + __func__, clk_hw_get_name(hw)); return err; } clk->opp_requested = 1; @@ -174,7 +174,7 @@ static void clk_prcmu_opp_volt_unprepare(struct clk_hw *hw) if (prcmu_request_clock(clk->cg_sel, false)) { pr_err("clk_prcmu: %s failed to disable %s.\n", __func__, - __clk_get_name(hw->clk)); + clk_hw_get_name(hw)); return; } diff --git a/drivers/clk/ux500/clk-sysctrl.c b/drivers/clk/ux500/clk-sysctrl.c index e364c9d4a..266ddea63 100644 --- a/drivers/clk/ux500/clk-sysctrl.c +++ b/drivers/clk/ux500/clk-sysctrl.c @@ -52,7 +52,7 @@ static void clk_sysctrl_unprepare(struct clk_hw *hw) struct clk_sysctrl *clk = to_clk_sysctrl(hw); if (ab8500_sysctrl_clear(clk->reg_sel[0], clk->reg_mask[0])) dev_err(clk->dev, "clk_sysctrl: %s fail to clear %s.\n", - __func__, __clk_get_name(hw->clk)); + __func__, clk_hw_get_name(hw)); } static unsigned long clk_sysctrl_recalc_rate(struct clk_hw *hw, diff --git a/drivers/clk/ux500/clk.h b/drivers/clk/ux500/clk.h index a2bb92d85..b42485da7 100644 --- a/drivers/clk/ux500/clk.h +++ b/drivers/clk/ux500/clk.h @@ -10,10 +10,11 @@ #ifndef __UX500_CLK_H #define __UX500_CLK_H -#include #include #include +struct clk; + struct clk *clk_reg_prcc_pclk(const char *name, const char *parent_name, resource_size_t phy_base, diff --git a/drivers/clk/ux500/u8500_clk.c b/drivers/clk/ux500/u8500_clk.c deleted file mode 100644 index 4626b97b7..000000000 --- a/drivers/clk/ux500/u8500_clk.c +++ /dev/null @@ -1,526 +0,0 @@ -/* - * Clock definitions for u8500 platform. - * - * Copyright (C) 2012 ST-Ericsson SA - * Author: Ulf Hansson - * - * License terms: GNU General Public License (GPL) version 2 - */ - -#include -#include -#include -#include -#include -#include "clk.h" - -void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base, - u32 clkrst5_base, u32 clkrst6_base) -{ - struct prcmu_fw_version *fw_version; - const char *sgaclk_parent = NULL; - struct clk *clk; - - /* Clock sources */ - clk = clk_reg_prcmu_gate("soc0_pll", NULL, PRCMU_PLLSOC0, - CLK_IS_ROOT|CLK_IGNORE_UNUSED); - clk_register_clkdev(clk, "soc0_pll", NULL); - - clk = clk_reg_prcmu_gate("soc1_pll", NULL, PRCMU_PLLSOC1, - CLK_IS_ROOT|CLK_IGNORE_UNUSED); - clk_register_clkdev(clk, "soc1_pll", NULL); - - clk = clk_reg_prcmu_gate("ddr_pll", NULL, PRCMU_PLLDDR, - CLK_IS_ROOT|CLK_IGNORE_UNUSED); - clk_register_clkdev(clk, "ddr_pll", NULL); - - /* FIXME: Add sys, ulp and int clocks here. */ - - clk = clk_register_fixed_rate(NULL, "rtc32k", "NULL", - CLK_IS_ROOT|CLK_IGNORE_UNUSED, - 32768); - clk_register_clkdev(clk, "clk32k", NULL); - clk_register_clkdev(clk, "apb_pclk", "rtc-pl031"); - - /* PRCMU clocks */ - fw_version = prcmu_get_fw_version(); - if (fw_version != NULL) { - switch (fw_version->project) { - case PRCMU_FW_PROJECT_U8500_C2: - case PRCMU_FW_PROJECT_U8520: - case PRCMU_FW_PROJECT_U8420: - sgaclk_parent = "soc0_pll"; - break; - default: - break; - } - } - - if (sgaclk_parent) - clk = clk_reg_prcmu_gate("sgclk", sgaclk_parent, - PRCMU_SGACLK, 0); - else - clk = clk_reg_prcmu_gate("sgclk", NULL, - PRCMU_SGACLK, CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "mali"); - - clk = clk_reg_prcmu_gate("uartclk", NULL, PRCMU_UARTCLK, CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "UART"); - - clk = clk_reg_prcmu_gate("msp02clk", NULL, PRCMU_MSP02CLK, CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "MSP02"); - - clk = clk_reg_prcmu_gate("msp1clk", NULL, PRCMU_MSP1CLK, CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "MSP1"); - - clk = clk_reg_prcmu_gate("i2cclk", NULL, PRCMU_I2CCLK, CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "I2C"); - - clk = clk_reg_prcmu_gate("slimclk", NULL, PRCMU_SLIMCLK, CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "slim"); - - clk = clk_reg_prcmu_gate("per1clk", NULL, PRCMU_PER1CLK, CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "PERIPH1"); - - clk = clk_reg_prcmu_gate("per2clk", NULL, PRCMU_PER2CLK, CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "PERIPH2"); - - clk = clk_reg_prcmu_gate("per3clk", NULL, PRCMU_PER3CLK, CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "PERIPH3"); - - clk = clk_reg_prcmu_gate("per5clk", NULL, PRCMU_PER5CLK, CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "PERIPH5"); - - clk = clk_reg_prcmu_gate("per6clk", NULL, PRCMU_PER6CLK, CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "PERIPH6"); - - clk = clk_reg_prcmu_gate("per7clk", NULL, PRCMU_PER7CLK, CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "PERIPH7"); - - clk = clk_reg_prcmu_scalable("lcdclk", NULL, PRCMU_LCDCLK, 0, - CLK_IS_ROOT|CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "lcd"); - clk_register_clkdev(clk, "lcd", "mcde"); - - clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK, CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "bml"); - - clk = clk_reg_prcmu_scalable("hsitxclk", NULL, PRCMU_HSITXCLK, 0, - CLK_IS_ROOT|CLK_SET_RATE_GATE); - - clk = clk_reg_prcmu_scalable("hsirxclk", NULL, PRCMU_HSIRXCLK, 0, - CLK_IS_ROOT|CLK_SET_RATE_GATE); - - clk = clk_reg_prcmu_scalable("hdmiclk", NULL, PRCMU_HDMICLK, 0, - CLK_IS_ROOT|CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "hdmi"); - clk_register_clkdev(clk, "hdmi", "mcde"); - - clk = clk_reg_prcmu_scalable("apeatclk", NULL, PRCMU_APEATCLK, 0, - CLK_IS_ROOT|CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "apeat"); - - clk = clk_reg_prcmu_scalable("apetraceclk", NULL, PRCMU_APETRACECLK, 0, - CLK_IS_ROOT|CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "apetrace"); - - clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "mcde"); - clk_register_clkdev(clk, "mcde", "mcde"); - clk_register_clkdev(clk, "dsisys", "dsilink.0"); - clk_register_clkdev(clk, "dsisys", "dsilink.1"); - clk_register_clkdev(clk, "dsisys", "dsilink.2"); - - clk = clk_reg_prcmu_opp_gate("ipi2cclk", NULL, PRCMU_IPI2CCLK, - CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "ipi2"); - - clk = clk_reg_prcmu_gate("dsialtclk", NULL, PRCMU_DSIALTCLK, - CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "dsialt"); - - clk = clk_reg_prcmu_gate("dmaclk", NULL, PRCMU_DMACLK, CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "dma40.0"); - - clk = clk_reg_prcmu_gate("b2r2clk", NULL, PRCMU_B2R2CLK, CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "b2r2"); - clk_register_clkdev(clk, NULL, "b2r2_core"); - clk_register_clkdev(clk, NULL, "U8500-B2R2.0"); - - clk = clk_reg_prcmu_scalable("tvclk", NULL, PRCMU_TVCLK, 0, - CLK_IS_ROOT|CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "tv"); - clk_register_clkdev(clk, "tv", "mcde"); - - clk = clk_reg_prcmu_gate("sspclk", NULL, PRCMU_SSPCLK, CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "SSP"); - - clk = clk_reg_prcmu_gate("rngclk", NULL, PRCMU_RNGCLK, CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "rngclk"); - - clk = clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "uicc"); - - clk = clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, CLK_IS_ROOT); - clk_register_clkdev(clk, NULL, "mtu0"); - clk_register_clkdev(clk, NULL, "mtu1"); - - clk = clk_reg_prcmu_opp_volt_scalable("sdmmcclk", NULL, PRCMU_SDMMCCLK, - 100000000, - CLK_IS_ROOT|CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "sdmmc"); - - clk = clk_reg_prcmu_scalable("dsi_pll", "hdmiclk", - PRCMU_PLLDSI, 0, CLK_SET_RATE_GATE); - clk_register_clkdev(clk, "dsihs2", "mcde"); - clk_register_clkdev(clk, "dsihs2", "dsilink.2"); - - - clk = clk_reg_prcmu_scalable("dsi0clk", "dsi_pll", - PRCMU_DSI0CLK, 0, CLK_SET_RATE_GATE); - clk_register_clkdev(clk, "dsihs0", "mcde"); - clk_register_clkdev(clk, "dsihs0", "dsilink.0"); - - clk = clk_reg_prcmu_scalable("dsi1clk", "dsi_pll", - PRCMU_DSI1CLK, 0, CLK_SET_RATE_GATE); - clk_register_clkdev(clk, "dsihs1", "mcde"); - clk_register_clkdev(clk, "dsihs1", "dsilink.1"); - - clk = clk_reg_prcmu_scalable("dsi0escclk", "tvclk", - PRCMU_DSI0ESCCLK, 0, CLK_SET_RATE_GATE); - clk_register_clkdev(clk, "dsilp0", "dsilink.0"); - clk_register_clkdev(clk, "dsilp0", "mcde"); - - clk = clk_reg_prcmu_scalable("dsi1escclk", "tvclk", - PRCMU_DSI1ESCCLK, 0, CLK_SET_RATE_GATE); - clk_register_clkdev(clk, "dsilp1", "dsilink.1"); - clk_register_clkdev(clk, "dsilp1", "mcde"); - - clk = clk_reg_prcmu_scalable("dsi2escclk", "tvclk", - PRCMU_DSI2ESCCLK, 0, CLK_SET_RATE_GATE); - clk_register_clkdev(clk, "dsilp2", "dsilink.2"); - clk_register_clkdev(clk, "dsilp2", "mcde"); - - clk = clk_reg_prcmu_scalable_rate("armss", NULL, - PRCMU_ARMSS, 0, CLK_IS_ROOT|CLK_IGNORE_UNUSED); - clk_register_clkdev(clk, "armss", NULL); - - clk = clk_register_fixed_factor(NULL, "smp_twd", "armss", - CLK_IGNORE_UNUSED, 1, 2); - clk_register_clkdev(clk, NULL, "smp_twd"); - - /* - * FIXME: Add special handled PRCMU clocks here: - * 1. clkout0yuv, use PRCMU as parent + need regulator + pinctrl. - * 2. ab9540_clkout1yuv, see clkout0yuv - */ - - /* PRCC P-clocks */ - clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", clkrst1_base, - BIT(0), 0); - clk_register_clkdev(clk, "apb_pclk", "uart0"); - - clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", clkrst1_base, - BIT(1), 0); - clk_register_clkdev(clk, "apb_pclk", "uart1"); - - clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", clkrst1_base, - BIT(2), 0); - clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.1"); - - clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", clkrst1_base, - BIT(3), 0); - clk_register_clkdev(clk, "apb_pclk", "msp0"); - clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.0"); - - clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", clkrst1_base, - BIT(4), 0); - clk_register_clkdev(clk, "apb_pclk", "msp1"); - clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.1"); - - clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", clkrst1_base, - BIT(5), 0); - clk_register_clkdev(clk, "apb_pclk", "sdi0"); - - clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", clkrst1_base, - BIT(6), 0); - clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.2"); - - clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", clkrst1_base, - BIT(7), 0); - clk_register_clkdev(clk, NULL, "spi3"); - - clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", clkrst1_base, - BIT(8), 0); - clk_register_clkdev(clk, "apb_pclk", "slimbus0"); - - clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", clkrst1_base, - BIT(9), 0); - clk_register_clkdev(clk, NULL, "gpio.0"); - clk_register_clkdev(clk, NULL, "gpio.1"); - clk_register_clkdev(clk, NULL, "gpioblock0"); - - clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", clkrst1_base, - BIT(10), 0); - clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.4"); - - clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", clkrst1_base, - BIT(11), 0); - clk_register_clkdev(clk, "apb_pclk", "msp3"); - clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.3"); - - clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", clkrst2_base, - BIT(0), 0); - clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.3"); - - clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", clkrst2_base, - BIT(1), 0); - clk_register_clkdev(clk, NULL, "spi2"); - - clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", clkrst2_base, - BIT(2), 0); - clk_register_clkdev(clk, NULL, "spi1"); - - clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", clkrst2_base, - BIT(3), 0); - clk_register_clkdev(clk, NULL, "pwl"); - - clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", clkrst2_base, - BIT(4), 0); - clk_register_clkdev(clk, "apb_pclk", "sdi4"); - - clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", clkrst2_base, - BIT(5), 0); - clk_register_clkdev(clk, "apb_pclk", "msp2"); - clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.2"); - - clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", clkrst2_base, - BIT(6), 0); - clk_register_clkdev(clk, "apb_pclk", "sdi1"); - - clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", clkrst2_base, - BIT(7), 0); - clk_register_clkdev(clk, "apb_pclk", "sdi3"); - - clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", clkrst2_base, - BIT(8), 0); - clk_register_clkdev(clk, NULL, "spi0"); - - clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", clkrst2_base, - BIT(9), 0); - clk_register_clkdev(clk, "hsir_hclk", "ste_hsi.0"); - - clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", clkrst2_base, - BIT(10), 0); - clk_register_clkdev(clk, "hsit_hclk", "ste_hsi.0"); - - clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", clkrst2_base, - BIT(11), 0); - clk_register_clkdev(clk, NULL, "gpio.6"); - clk_register_clkdev(clk, NULL, "gpio.7"); - clk_register_clkdev(clk, NULL, "gpioblock1"); - - clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", clkrst2_base, - BIT(12), 0); - - clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", clkrst3_base, - BIT(0), 0); - clk_register_clkdev(clk, "fsmc", NULL); - clk_register_clkdev(clk, NULL, "smsc911x.0"); - - clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", clkrst3_base, - BIT(1), 0); - clk_register_clkdev(clk, "apb_pclk", "ssp0"); - - clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", clkrst3_base, - BIT(2), 0); - clk_register_clkdev(clk, "apb_pclk", "ssp1"); - - clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", clkrst3_base, - BIT(3), 0); - clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.0"); - - clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", clkrst3_base, - BIT(4), 0); - clk_register_clkdev(clk, "apb_pclk", "sdi2"); - - clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", clkrst3_base, - BIT(5), 0); - clk_register_clkdev(clk, "apb_pclk", "ske"); - clk_register_clkdev(clk, "apb_pclk", "nmk-ske-keypad"); - - clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", clkrst3_base, - BIT(6), 0); - clk_register_clkdev(clk, "apb_pclk", "uart2"); - - clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", clkrst3_base, - BIT(7), 0); - clk_register_clkdev(clk, "apb_pclk", "sdi5"); - - clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", clkrst3_base, - BIT(8), 0); - clk_register_clkdev(clk, NULL, "gpio.2"); - clk_register_clkdev(clk, NULL, "gpio.3"); - clk_register_clkdev(clk, NULL, "gpio.4"); - clk_register_clkdev(clk, NULL, "gpio.5"); - clk_register_clkdev(clk, NULL, "gpioblock2"); - - clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", clkrst5_base, - BIT(0), 0); - clk_register_clkdev(clk, "usb", "musb-ux500.0"); - - clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", clkrst5_base, - BIT(1), 0); - clk_register_clkdev(clk, NULL, "gpio.8"); - clk_register_clkdev(clk, NULL, "gpioblock3"); - - clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", clkrst6_base, - BIT(0), 0); - clk_register_clkdev(clk, "apb_pclk", "rng"); - - clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", clkrst6_base, - BIT(1), 0); - clk_register_clkdev(clk, NULL, "cryp0"); - clk_register_clkdev(clk, NULL, "cryp1"); - - clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", clkrst6_base, - BIT(2), 0); - clk_register_clkdev(clk, NULL, "hash0"); - - clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", clkrst6_base, - BIT(3), 0); - clk_register_clkdev(clk, NULL, "pka"); - - clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", clkrst6_base, - BIT(4), 0); - clk_register_clkdev(clk, NULL, "hash1"); - - clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", clkrst6_base, - BIT(5), 0); - clk_register_clkdev(clk, NULL, "cfgreg"); - - clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", clkrst6_base, - BIT(6), 0); - clk_register_clkdev(clk, "apb_pclk", "mtu0"); - - clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", clkrst6_base, - BIT(7), 0); - clk_register_clkdev(clk, "apb_pclk", "mtu1"); - - /* PRCC K-clocks - * - * FIXME: Some drivers requires PERPIH[n| to be automatically enabled - * by enabling just the K-clock, even if it is not a valid parent to - * the K-clock. Until drivers get fixed we might need some kind of - * "parent muxed join". - */ - - /* Periph1 */ - clk = clk_reg_prcc_kclk("p1_uart0_kclk", "uartclk", - clkrst1_base, BIT(0), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "uart0"); - - clk = clk_reg_prcc_kclk("p1_uart1_kclk", "uartclk", - clkrst1_base, BIT(1), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "uart1"); - - clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk", - clkrst1_base, BIT(2), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "nmk-i2c.1"); - - clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk", - clkrst1_base, BIT(3), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "msp0"); - clk_register_clkdev(clk, NULL, "ux500-msp-i2s.0"); - - clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk", - clkrst1_base, BIT(4), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "msp1"); - clk_register_clkdev(clk, NULL, "ux500-msp-i2s.1"); - - clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmcclk", - clkrst1_base, BIT(5), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "sdi0"); - - clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk", - clkrst1_base, BIT(6), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "nmk-i2c.2"); - - clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk", - clkrst1_base, BIT(8), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "slimbus0"); - - clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk", - clkrst1_base, BIT(9), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "nmk-i2c.4"); - - clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk", - clkrst1_base, BIT(10), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "msp3"); - clk_register_clkdev(clk, NULL, "ux500-msp-i2s.3"); - - /* Periph2 */ - clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk", - clkrst2_base, BIT(0), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "nmk-i2c.3"); - - clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmcclk", - clkrst2_base, BIT(2), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "sdi4"); - - clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk", - clkrst2_base, BIT(3), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "msp2"); - clk_register_clkdev(clk, NULL, "ux500-msp-i2s.2"); - - clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmcclk", - clkrst2_base, BIT(4), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "sdi1"); - - clk = clk_reg_prcc_kclk("p2_sdi3_kclk", "sdmmcclk", - clkrst2_base, BIT(5), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "sdi3"); - - /* Note that rate is received from parent. */ - clk = clk_reg_prcc_kclk("p2_ssirx_kclk", "hsirxclk", - clkrst2_base, BIT(6), - CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT); - clk = clk_reg_prcc_kclk("p2_ssitx_kclk", "hsitxclk", - clkrst2_base, BIT(7), - CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT); - - /* Periph3 */ - clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk", - clkrst3_base, BIT(1), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "ssp0"); - - clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk", - clkrst3_base, BIT(2), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "ssp1"); - - clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk", - clkrst3_base, BIT(3), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "nmk-i2c.0"); - - clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmcclk", - clkrst3_base, BIT(4), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "sdi2"); - - clk = clk_reg_prcc_kclk("p3_ske_kclk", "rtc32k", - clkrst3_base, BIT(5), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "ske"); - clk_register_clkdev(clk, NULL, "nmk-ske-keypad"); - - clk = clk_reg_prcc_kclk("p3_uart2_kclk", "uartclk", - clkrst3_base, BIT(6), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "uart2"); - - clk = clk_reg_prcc_kclk("p3_sdi5_kclk", "sdmmcclk", - clkrst3_base, BIT(7), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "sdi5"); - - /* Periph6 */ - clk = clk_reg_prcc_kclk("p3_rng_kclk", "rngclk", - clkrst6_base, BIT(0), CLK_SET_RATE_GATE); - clk_register_clkdev(clk, NULL, "rng"); -} diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c index e319ef912..271c09644 100644 --- a/drivers/clk/ux500/u8500_of_clk.c +++ b/drivers/clk/ux500/u8500_of_clk.c @@ -8,8 +8,7 @@ */ #include -#include -#include +#include #include #include #include @@ -54,14 +53,25 @@ static const struct of_device_id u8500_clk_of_match[] = { { }, }; -void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base, - u32 clkrst5_base, u32 clkrst6_base) +/* CLKRST4 is missing making it hard to index things */ +enum clkrst_index { + CLKRST1_INDEX = 0, + CLKRST2_INDEX, + CLKRST3_INDEX, + CLKRST5_INDEX, + CLKRST6_INDEX, + CLKRST_MAX, +}; + +void u8500_clk_init(void) { struct prcmu_fw_version *fw_version; struct device_node *np = NULL; struct device_node *child = NULL; const char *sgaclk_parent = NULL; struct clk *clk, *rtc_clk, *twd_clk; + u32 bases[CLKRST_MAX]; + int i; if (of_have_populated_dt()) np = of_find_matching_node(NULL, u8500_clk_of_match); @@ -69,6 +79,15 @@ void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base, pr_err("Either DT or U8500 Clock node not found\n"); return; } + for (i = 0; i < ARRAY_SIZE(bases); i++) { + struct resource r; + + if (of_address_to_resource(np, i, &r)) + /* Not much choice but to continue */ + pr_err("failed to get CLKRST %d base address\n", + i + 1); + bases[i] = r.start; + } /* Clock sources */ clk = clk_reg_prcmu_gate("soc0_pll", NULL, PRCMU_PLLSOC0, @@ -246,179 +265,179 @@ void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base, */ /* PRCC P-clocks */ - clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", bases[CLKRST1_INDEX], BIT(0), 0); PRCC_PCLK_STORE(clk, 1, 0); - clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", bases[CLKRST1_INDEX], BIT(1), 0); PRCC_PCLK_STORE(clk, 1, 1); - clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", bases[CLKRST1_INDEX], BIT(2), 0); PRCC_PCLK_STORE(clk, 1, 2); - clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", bases[CLKRST1_INDEX], BIT(3), 0); PRCC_PCLK_STORE(clk, 1, 3); - clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", bases[CLKRST1_INDEX], BIT(4), 0); PRCC_PCLK_STORE(clk, 1, 4); - clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", bases[CLKRST1_INDEX], BIT(5), 0); PRCC_PCLK_STORE(clk, 1, 5); - clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", bases[CLKRST1_INDEX], BIT(6), 0); PRCC_PCLK_STORE(clk, 1, 6); - clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", bases[CLKRST1_INDEX], BIT(7), 0); PRCC_PCLK_STORE(clk, 1, 7); - clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", bases[CLKRST1_INDEX], BIT(8), 0); PRCC_PCLK_STORE(clk, 1, 8); - clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", bases[CLKRST1_INDEX], BIT(9), 0); PRCC_PCLK_STORE(clk, 1, 9); - clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", bases[CLKRST1_INDEX], BIT(10), 0); PRCC_PCLK_STORE(clk, 1, 10); - clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", bases[CLKRST1_INDEX], BIT(11), 0); PRCC_PCLK_STORE(clk, 1, 11); - clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", bases[CLKRST2_INDEX], BIT(0), 0); PRCC_PCLK_STORE(clk, 2, 0); - clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", bases[CLKRST2_INDEX], BIT(1), 0); PRCC_PCLK_STORE(clk, 2, 1); - clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", bases[CLKRST2_INDEX], BIT(2), 0); PRCC_PCLK_STORE(clk, 2, 2); - clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", bases[CLKRST2_INDEX], BIT(3), 0); PRCC_PCLK_STORE(clk, 2, 3); - clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", bases[CLKRST2_INDEX], BIT(4), 0); PRCC_PCLK_STORE(clk, 2, 4); - clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", bases[CLKRST2_INDEX], BIT(5), 0); PRCC_PCLK_STORE(clk, 2, 5); - clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", bases[CLKRST2_INDEX], BIT(6), 0); PRCC_PCLK_STORE(clk, 2, 6); - clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", bases[CLKRST2_INDEX], BIT(7), 0); PRCC_PCLK_STORE(clk, 2, 7); - clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", bases[CLKRST2_INDEX], BIT(8), 0); PRCC_PCLK_STORE(clk, 2, 8); - clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", bases[CLKRST2_INDEX], BIT(9), 0); PRCC_PCLK_STORE(clk, 2, 9); - clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", bases[CLKRST2_INDEX], BIT(10), 0); PRCC_PCLK_STORE(clk, 2, 10); - clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", bases[CLKRST2_INDEX], BIT(11), 0); PRCC_PCLK_STORE(clk, 2, 11); - clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", bases[CLKRST2_INDEX], BIT(12), 0); PRCC_PCLK_STORE(clk, 2, 12); - clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", bases[CLKRST3_INDEX], BIT(0), 0); PRCC_PCLK_STORE(clk, 3, 0); - clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", bases[CLKRST3_INDEX], BIT(1), 0); PRCC_PCLK_STORE(clk, 3, 1); - clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", bases[CLKRST3_INDEX], BIT(2), 0); PRCC_PCLK_STORE(clk, 3, 2); - clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", bases[CLKRST3_INDEX], BIT(3), 0); PRCC_PCLK_STORE(clk, 3, 3); - clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", bases[CLKRST3_INDEX], BIT(4), 0); PRCC_PCLK_STORE(clk, 3, 4); - clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", bases[CLKRST3_INDEX], BIT(5), 0); PRCC_PCLK_STORE(clk, 3, 5); - clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", bases[CLKRST3_INDEX], BIT(6), 0); PRCC_PCLK_STORE(clk, 3, 6); - clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", bases[CLKRST3_INDEX], BIT(7), 0); PRCC_PCLK_STORE(clk, 3, 7); - clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", bases[CLKRST3_INDEX], BIT(8), 0); PRCC_PCLK_STORE(clk, 3, 8); - clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", clkrst5_base, + clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", bases[CLKRST5_INDEX], BIT(0), 0); PRCC_PCLK_STORE(clk, 5, 0); - clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", clkrst5_base, + clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", bases[CLKRST5_INDEX], BIT(1), 0); PRCC_PCLK_STORE(clk, 5, 1); - clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", clkrst6_base, + clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", bases[CLKRST6_INDEX], BIT(0), 0); PRCC_PCLK_STORE(clk, 6, 0); - clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", clkrst6_base, + clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", bases[CLKRST6_INDEX], BIT(1), 0); PRCC_PCLK_STORE(clk, 6, 1); - clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", clkrst6_base, + clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", bases[CLKRST6_INDEX], BIT(2), 0); PRCC_PCLK_STORE(clk, 6, 2); - clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", clkrst6_base, + clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", bases[CLKRST6_INDEX], BIT(3), 0); PRCC_PCLK_STORE(clk, 6, 3); - clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", clkrst6_base, + clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", bases[CLKRST6_INDEX], BIT(4), 0); PRCC_PCLK_STORE(clk, 6, 4); - clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", clkrst6_base, + clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", bases[CLKRST6_INDEX], BIT(5), 0); PRCC_PCLK_STORE(clk, 6, 5); - clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", clkrst6_base, + clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", bases[CLKRST6_INDEX], BIT(6), 0); PRCC_PCLK_STORE(clk, 6, 6); - clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", clkrst6_base, + clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", bases[CLKRST6_INDEX], BIT(7), 0); PRCC_PCLK_STORE(clk, 6, 7); @@ -432,109 +451,109 @@ void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base, /* Periph1 */ clk = clk_reg_prcc_kclk("p1_uart0_kclk", "uartclk", - clkrst1_base, BIT(0), CLK_SET_RATE_GATE); + bases[CLKRST1_INDEX], BIT(0), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 1, 0); clk = clk_reg_prcc_kclk("p1_uart1_kclk", "uartclk", - clkrst1_base, BIT(1), CLK_SET_RATE_GATE); + bases[CLKRST1_INDEX], BIT(1), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 1, 1); clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk", - clkrst1_base, BIT(2), CLK_SET_RATE_GATE); + bases[CLKRST1_INDEX], BIT(2), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 1, 2); clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk", - clkrst1_base, BIT(3), CLK_SET_RATE_GATE); + bases[CLKRST1_INDEX], BIT(3), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 1, 3); clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk", - clkrst1_base, BIT(4), CLK_SET_RATE_GATE); + bases[CLKRST1_INDEX], BIT(4), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 1, 4); clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmcclk", - clkrst1_base, BIT(5), CLK_SET_RATE_GATE); + bases[CLKRST1_INDEX], BIT(5), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 1, 5); clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk", - clkrst1_base, BIT(6), CLK_SET_RATE_GATE); + bases[CLKRST1_INDEX], BIT(6), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 1, 6); clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk", - clkrst1_base, BIT(8), CLK_SET_RATE_GATE); + bases[CLKRST1_INDEX], BIT(8), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 1, 8); clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk", - clkrst1_base, BIT(9), CLK_SET_RATE_GATE); + bases[CLKRST1_INDEX], BIT(9), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 1, 9); clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk", - clkrst1_base, BIT(10), CLK_SET_RATE_GATE); + bases[CLKRST1_INDEX], BIT(10), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 1, 10); /* Periph2 */ clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk", - clkrst2_base, BIT(0), CLK_SET_RATE_GATE); + bases[CLKRST2_INDEX], BIT(0), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 2, 0); clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmcclk", - clkrst2_base, BIT(2), CLK_SET_RATE_GATE); + bases[CLKRST2_INDEX], BIT(2), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 2, 2); clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk", - clkrst2_base, BIT(3), CLK_SET_RATE_GATE); + bases[CLKRST2_INDEX], BIT(3), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 2, 3); clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmcclk", - clkrst2_base, BIT(4), CLK_SET_RATE_GATE); + bases[CLKRST2_INDEX], BIT(4), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 2, 4); clk = clk_reg_prcc_kclk("p2_sdi3_kclk", "sdmmcclk", - clkrst2_base, BIT(5), CLK_SET_RATE_GATE); + bases[CLKRST2_INDEX], BIT(5), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 2, 5); /* Note that rate is received from parent. */ clk = clk_reg_prcc_kclk("p2_ssirx_kclk", "hsirxclk", - clkrst2_base, BIT(6), + bases[CLKRST2_INDEX], BIT(6), CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT); PRCC_KCLK_STORE(clk, 2, 6); clk = clk_reg_prcc_kclk("p2_ssitx_kclk", "hsitxclk", - clkrst2_base, BIT(7), + bases[CLKRST2_INDEX], BIT(7), CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT); PRCC_KCLK_STORE(clk, 2, 7); /* Periph3 */ clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk", - clkrst3_base, BIT(1), CLK_SET_RATE_GATE); + bases[CLKRST3_INDEX], BIT(1), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 3, 1); clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk", - clkrst3_base, BIT(2), CLK_SET_RATE_GATE); + bases[CLKRST3_INDEX], BIT(2), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 3, 2); clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk", - clkrst3_base, BIT(3), CLK_SET_RATE_GATE); + bases[CLKRST3_INDEX], BIT(3), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 3, 3); clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmcclk", - clkrst3_base, BIT(4), CLK_SET_RATE_GATE); + bases[CLKRST3_INDEX], BIT(4), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 3, 4); clk = clk_reg_prcc_kclk("p3_ske_kclk", "rtc32k", - clkrst3_base, BIT(5), CLK_SET_RATE_GATE); + bases[CLKRST3_INDEX], BIT(5), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 3, 5); clk = clk_reg_prcc_kclk("p3_uart2_kclk", "uartclk", - clkrst3_base, BIT(6), CLK_SET_RATE_GATE); + bases[CLKRST3_INDEX], BIT(6), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 3, 6); clk = clk_reg_prcc_kclk("p3_sdi5_kclk", "sdmmcclk", - clkrst3_base, BIT(7), CLK_SET_RATE_GATE); + bases[CLKRST3_INDEX], BIT(7), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 3, 7); /* Periph6 */ clk = clk_reg_prcc_kclk("p3_rng_kclk", "rngclk", - clkrst6_base, BIT(0), CLK_SET_RATE_GATE); + bases[CLKRST6_INDEX], BIT(0), CLK_SET_RATE_GATE); PRCC_KCLK_STORE(clk, 6, 0); for_each_child_of_node(np, child) { diff --git a/drivers/clk/ux500/u8540_clk.c b/drivers/clk/ux500/u8540_clk.c index 20c8add90..d7bcb7a86 100644 --- a/drivers/clk/ux500/u8540_clk.c +++ b/drivers/clk/ux500/u8540_clk.c @@ -7,17 +7,51 @@ * License terms: GNU General Public License (GPL) version 2 */ -#include +#include +#include #include #include #include #include #include "clk.h" -void u8540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base, - u32 clkrst5_base, u32 clkrst6_base) +static const struct of_device_id u8540_clk_of_match[] = { + { .compatible = "stericsson,u8540-clks", }, + { } +}; + +/* CLKRST4 is missing making it hard to index things */ +enum clkrst_index { + CLKRST1_INDEX = 0, + CLKRST2_INDEX, + CLKRST3_INDEX, + CLKRST5_INDEX, + CLKRST6_INDEX, + CLKRST_MAX, +}; + +void u8540_clk_init(void) { struct clk *clk; + struct device_node *np = NULL; + u32 bases[CLKRST_MAX]; + int i; + + if (of_have_populated_dt()) + np = of_find_matching_node(NULL, u8540_clk_of_match); + if (!np) { + pr_err("Either DT or U8540 Clock node not found\n"); + return; + } + for (i = 0; i < ARRAY_SIZE(bases); i++) { + struct resource r; + + if (of_address_to_resource(np, i, &r)) + /* Not much choice but to continue */ + pr_err("failed to get CLKRST %d base address\n", + i + 1); + bases[i] = r.start; + } /* Clock sources. */ /* Fixed ClockGen */ @@ -219,151 +253,151 @@ void u8540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base, /* PRCC P-clocks */ /* Peripheral 1 : PRCC P-clocks */ - clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", bases[CLKRST1_INDEX], BIT(0), 0); clk_register_clkdev(clk, "apb_pclk", "uart0"); - clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", bases[CLKRST1_INDEX], BIT(1), 0); clk_register_clkdev(clk, "apb_pclk", "uart1"); - clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", bases[CLKRST1_INDEX], BIT(2), 0); clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.1"); - clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", bases[CLKRST1_INDEX], BIT(3), 0); clk_register_clkdev(clk, "apb_pclk", "msp0"); clk_register_clkdev(clk, "apb_pclk", "dbx5x0-msp-i2s.0"); - clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", bases[CLKRST1_INDEX], BIT(4), 0); clk_register_clkdev(clk, "apb_pclk", "msp1"); clk_register_clkdev(clk, "apb_pclk", "dbx5x0-msp-i2s.1"); - clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", bases[CLKRST1_INDEX], BIT(5), 0); clk_register_clkdev(clk, "apb_pclk", "sdi0"); - clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", bases[CLKRST1_INDEX], BIT(6), 0); clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.2"); - clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", bases[CLKRST1_INDEX], BIT(7), 0); clk_register_clkdev(clk, NULL, "spi3"); - clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", bases[CLKRST1_INDEX], BIT(8), 0); clk_register_clkdev(clk, "apb_pclk", "slimbus0"); - clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", bases[CLKRST1_INDEX], BIT(9), 0); clk_register_clkdev(clk, NULL, "gpio.0"); clk_register_clkdev(clk, NULL, "gpio.1"); clk_register_clkdev(clk, NULL, "gpioblock0"); clk_register_clkdev(clk, "apb_pclk", "ab85xx-codec.0"); - clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", bases[CLKRST1_INDEX], BIT(10), 0); clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.4"); - clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", clkrst1_base, + clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", bases[CLKRST1_INDEX], BIT(11), 0); clk_register_clkdev(clk, "apb_pclk", "msp3"); clk_register_clkdev(clk, "apb_pclk", "dbx5x0-msp-i2s.3"); /* Peripheral 2 : PRCC P-clocks */ - clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", bases[CLKRST2_INDEX], BIT(0), 0); clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.3"); - clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", bases[CLKRST2_INDEX], BIT(1), 0); clk_register_clkdev(clk, NULL, "spi2"); - clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", bases[CLKRST2_INDEX], BIT(2), 0); clk_register_clkdev(clk, NULL, "spi1"); - clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", bases[CLKRST2_INDEX], BIT(3), 0); clk_register_clkdev(clk, NULL, "pwl"); - clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", bases[CLKRST2_INDEX], BIT(4), 0); clk_register_clkdev(clk, "apb_pclk", "sdi4"); - clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", bases[CLKRST2_INDEX], BIT(5), 0); clk_register_clkdev(clk, "apb_pclk", "msp2"); clk_register_clkdev(clk, "apb_pclk", "dbx5x0-msp-i2s.2"); - clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", bases[CLKRST2_INDEX], BIT(6), 0); clk_register_clkdev(clk, "apb_pclk", "sdi1"); - clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", bases[CLKRST2_INDEX], BIT(7), 0); clk_register_clkdev(clk, "apb_pclk", "sdi3"); - clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", bases[CLKRST2_INDEX], BIT(8), 0); clk_register_clkdev(clk, NULL, "spi0"); - clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", bases[CLKRST2_INDEX], BIT(9), 0); clk_register_clkdev(clk, "hsir_hclk", "ste_hsi.0"); - clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", bases[CLKRST2_INDEX], BIT(10), 0); clk_register_clkdev(clk, "hsit_hclk", "ste_hsi.0"); - clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", bases[CLKRST2_INDEX], BIT(11), 0); clk_register_clkdev(clk, NULL, "gpio.6"); clk_register_clkdev(clk, NULL, "gpio.7"); clk_register_clkdev(clk, NULL, "gpioblock1"); - clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", clkrst2_base, + clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", bases[CLKRST2_INDEX], BIT(12), 0); clk_register_clkdev(clk, "msp4-pclk", "ab85xx-codec.0"); /* Peripheral 3 : PRCC P-clocks */ - clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", bases[CLKRST3_INDEX], BIT(0), 0); clk_register_clkdev(clk, NULL, "fsmc"); - clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", bases[CLKRST3_INDEX], BIT(1), 0); clk_register_clkdev(clk, "apb_pclk", "ssp0"); - clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", bases[CLKRST3_INDEX], BIT(2), 0); clk_register_clkdev(clk, "apb_pclk", "ssp1"); - clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", bases[CLKRST3_INDEX], BIT(3), 0); clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.0"); - clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", bases[CLKRST3_INDEX], BIT(4), 0); clk_register_clkdev(clk, "apb_pclk", "sdi2"); - clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", bases[CLKRST3_INDEX], BIT(5), 0); clk_register_clkdev(clk, "apb_pclk", "ske"); clk_register_clkdev(clk, "apb_pclk", "nmk-ske-keypad"); - clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", bases[CLKRST3_INDEX], BIT(6), 0); clk_register_clkdev(clk, "apb_pclk", "uart2"); - clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", bases[CLKRST3_INDEX], BIT(7), 0); clk_register_clkdev(clk, "apb_pclk", "sdi5"); - clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", bases[CLKRST3_INDEX], BIT(8), 0); clk_register_clkdev(clk, NULL, "gpio.2"); clk_register_clkdev(clk, NULL, "gpio.3"); @@ -371,64 +405,64 @@ void u8540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base, clk_register_clkdev(clk, NULL, "gpio.5"); clk_register_clkdev(clk, NULL, "gpioblock2"); - clk = clk_reg_prcc_pclk("p3_pclk9", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk9", "per3clk", bases[CLKRST3_INDEX], BIT(9), 0); clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.5"); - clk = clk_reg_prcc_pclk("p3_pclk10", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk10", "per3clk", bases[CLKRST3_INDEX], BIT(10), 0); clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.6"); - clk = clk_reg_prcc_pclk("p3_pclk11", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk11", "per3clk", bases[CLKRST3_INDEX], BIT(11), 0); clk_register_clkdev(clk, "apb_pclk", "uart3"); - clk = clk_reg_prcc_pclk("p3_pclk12", "per3clk", clkrst3_base, + clk = clk_reg_prcc_pclk("p3_pclk12", "per3clk", bases[CLKRST3_INDEX], BIT(12), 0); clk_register_clkdev(clk, "apb_pclk", "uart4"); /* Peripheral 5 : PRCC P-clocks */ - clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", clkrst5_base, + clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", bases[CLKRST5_INDEX], BIT(0), 0); clk_register_clkdev(clk, "usb", "musb-ux500.0"); clk_register_clkdev(clk, "usbclk", "ab-iddet.0"); - clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", clkrst5_base, + clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", bases[CLKRST5_INDEX], BIT(1), 0); clk_register_clkdev(clk, NULL, "gpio.8"); clk_register_clkdev(clk, NULL, "gpioblock3"); /* Peripheral 6 : PRCC P-clocks */ - clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", clkrst6_base, + clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", bases[CLKRST6_INDEX], BIT(0), 0); clk_register_clkdev(clk, "apb_pclk", "rng"); - clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", clkrst6_base, + clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", bases[CLKRST6_INDEX], BIT(1), 0); clk_register_clkdev(clk, NULL, "cryp0"); clk_register_clkdev(clk, NULL, "cryp1"); - clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", clkrst6_base, + clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", bases[CLKRST6_INDEX], BIT(2), 0); clk_register_clkdev(clk, NULL, "hash0"); - clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", clkrst6_base, + clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", bases[CLKRST6_INDEX], BIT(3), 0); clk_register_clkdev(clk, NULL, "pka"); - clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", clkrst6_base, + clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", bases[CLKRST6_INDEX], BIT(4), 0); clk_register_clkdev(clk, NULL, "db8540-hash1"); - clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", clkrst6_base, + clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", bases[CLKRST6_INDEX], BIT(5), 0); clk_register_clkdev(clk, NULL, "cfgreg"); - clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", clkrst6_base, + clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", bases[CLKRST6_INDEX], BIT(6), 0); clk_register_clkdev(clk, "apb_pclk", "mtu0"); - clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", clkrst6_base, + clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", bases[CLKRST6_INDEX], BIT(7), 0); clk_register_clkdev(clk, "apb_pclk", "mtu1"); @@ -442,138 +476,138 @@ void u8540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base, /* Peripheral 1 : PRCC K-clocks */ clk = clk_reg_prcc_kclk("p1_uart0_kclk", "uartclk", - clkrst1_base, BIT(0), CLK_SET_RATE_GATE); + bases[CLKRST1_INDEX], BIT(0), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "uart0"); clk = clk_reg_prcc_kclk("p1_uart1_kclk", "uartclk", - clkrst1_base, BIT(1), CLK_SET_RATE_GATE); + bases[CLKRST1_INDEX], BIT(1), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "uart1"); clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk", - clkrst1_base, BIT(2), CLK_SET_RATE_GATE); + bases[CLKRST1_INDEX], BIT(2), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "nmk-i2c.1"); clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk", - clkrst1_base, BIT(3), CLK_SET_RATE_GATE); + bases[CLKRST1_INDEX], BIT(3), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "msp0"); clk_register_clkdev(clk, NULL, "dbx5x0-msp-i2s.0"); clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk", - clkrst1_base, BIT(4), CLK_SET_RATE_GATE); + bases[CLKRST1_INDEX], BIT(4), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "msp1"); clk_register_clkdev(clk, NULL, "dbx5x0-msp-i2s.1"); clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmchclk", - clkrst1_base, BIT(5), CLK_SET_RATE_GATE); + bases[CLKRST1_INDEX], BIT(5), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "sdi0"); clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk", - clkrst1_base, BIT(6), CLK_SET_RATE_GATE); + bases[CLKRST1_INDEX], BIT(6), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "nmk-i2c.2"); clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk", - clkrst1_base, BIT(8), CLK_SET_RATE_GATE); + bases[CLKRST1_INDEX], BIT(8), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "slimbus0"); clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk", - clkrst1_base, BIT(9), CLK_SET_RATE_GATE); + bases[CLKRST1_INDEX], BIT(9), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "nmk-i2c.4"); clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk", - clkrst1_base, BIT(10), CLK_SET_RATE_GATE); + bases[CLKRST1_INDEX], BIT(10), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "msp3"); clk_register_clkdev(clk, NULL, "dbx5x0-msp-i2s.3"); /* Peripheral 2 : PRCC K-clocks */ clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk", - clkrst2_base, BIT(0), CLK_SET_RATE_GATE); + bases[CLKRST2_INDEX], BIT(0), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "nmk-i2c.3"); clk = clk_reg_prcc_kclk("p2_pwl_kclk", "rtc32k", - clkrst2_base, BIT(1), CLK_SET_RATE_GATE); + bases[CLKRST2_INDEX], BIT(1), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "pwl"); clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmchclk", - clkrst2_base, BIT(2), CLK_SET_RATE_GATE); + bases[CLKRST2_INDEX], BIT(2), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "sdi4"); clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk", - clkrst2_base, BIT(3), CLK_SET_RATE_GATE); + bases[CLKRST2_INDEX], BIT(3), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "msp2"); clk_register_clkdev(clk, NULL, "dbx5x0-msp-i2s.2"); clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmchclk", - clkrst2_base, BIT(4), CLK_SET_RATE_GATE); + bases[CLKRST2_INDEX], BIT(4), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "sdi1"); clk = clk_reg_prcc_kclk("p2_sdi3_kclk", "sdmmcclk", - clkrst2_base, BIT(5), CLK_SET_RATE_GATE); + bases[CLKRST2_INDEX], BIT(5), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "sdi3"); clk = clk_reg_prcc_kclk("p2_ssirx_kclk", "hsirxclk", - clkrst2_base, BIT(6), + bases[CLKRST2_INDEX], BIT(6), CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT); clk_register_clkdev(clk, "hsir_hsirxclk", "ste_hsi.0"); clk = clk_reg_prcc_kclk("p2_ssitx_kclk", "hsitxclk", - clkrst2_base, BIT(7), + bases[CLKRST2_INDEX], BIT(7), CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT); clk_register_clkdev(clk, "hsit_hsitxclk", "ste_hsi.0"); /* Should only be 9540, but might be added for 85xx as well */ clk = clk_reg_prcc_kclk("p2_msp4_kclk", "msp02clk", - clkrst2_base, BIT(9), CLK_SET_RATE_GATE); + bases[CLKRST2_INDEX], BIT(9), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "msp4"); clk_register_clkdev(clk, "msp4", "ab85xx-codec.0"); /* Peripheral 3 : PRCC K-clocks */ clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk", - clkrst3_base, BIT(1), CLK_SET_RATE_GATE); + bases[CLKRST3_INDEX], BIT(1), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "ssp0"); clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk", - clkrst3_base, BIT(2), CLK_SET_RATE_GATE); + bases[CLKRST3_INDEX], BIT(2), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "ssp1"); clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk", - clkrst3_base, BIT(3), CLK_SET_RATE_GATE); + bases[CLKRST3_INDEX], BIT(3), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "nmk-i2c.0"); clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmchclk", - clkrst3_base, BIT(4), CLK_SET_RATE_GATE); + bases[CLKRST3_INDEX], BIT(4), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "sdi2"); clk = clk_reg_prcc_kclk("p3_ske_kclk", "rtc32k", - clkrst3_base, BIT(5), CLK_SET_RATE_GATE); + bases[CLKRST3_INDEX], BIT(5), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "ske"); clk_register_clkdev(clk, NULL, "nmk-ske-keypad"); clk = clk_reg_prcc_kclk("p3_uart2_kclk", "uartclk", - clkrst3_base, BIT(6), CLK_SET_RATE_GATE); + bases[CLKRST3_INDEX], BIT(6), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "uart2"); clk = clk_reg_prcc_kclk("p3_sdi5_kclk", "sdmmcclk", - clkrst3_base, BIT(7), CLK_SET_RATE_GATE); + bases[CLKRST3_INDEX], BIT(7), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "sdi5"); clk = clk_reg_prcc_kclk("p3_i2c5_kclk", "i2cclk", - clkrst3_base, BIT(8), CLK_SET_RATE_GATE); + bases[CLKRST3_INDEX], BIT(8), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "nmk-i2c.5"); clk = clk_reg_prcc_kclk("p3_i2c6_kclk", "i2cclk", - clkrst3_base, BIT(9), CLK_SET_RATE_GATE); + bases[CLKRST3_INDEX], BIT(9), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "nmk-i2c.6"); clk = clk_reg_prcc_kclk("p3_uart3_kclk", "uartclk", - clkrst3_base, BIT(10), CLK_SET_RATE_GATE); + bases[CLKRST3_INDEX], BIT(10), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "uart3"); clk = clk_reg_prcc_kclk("p3_uart4_kclk", "uartclk", - clkrst3_base, BIT(11), CLK_SET_RATE_GATE); + bases[CLKRST3_INDEX], BIT(11), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "uart4"); /* Peripheral 6 : PRCC K-clocks */ clk = clk_reg_prcc_kclk("p6_rng_kclk", "rngclk", - clkrst6_base, BIT(0), CLK_SET_RATE_GATE); + bases[CLKRST6_INDEX], BIT(0), CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "rng"); } diff --git a/drivers/clk/ux500/u9540_clk.c b/drivers/clk/ux500/u9540_clk.c index 44794782e..2138a4c8c 100644 --- a/drivers/clk/ux500/u9540_clk.c +++ b/drivers/clk/ux500/u9540_clk.c @@ -7,15 +7,12 @@ * License terms: GNU General Public License (GPL) version 2 */ -#include -#include #include #include #include #include "clk.h" -void u9540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base, - u32 clkrst5_base, u32 clkrst6_base) +void u9540_clk_init(void) { /* register clocks here */ } diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c index bc96f103b..08c5ee976 100644 --- a/drivers/clk/versatile/clk-icst.c +++ b/drivers/clk/versatile/clk-icst.c @@ -13,8 +13,9 @@ * ICST clock code from the ARM tree should probably be merged into this * file. */ -#include -#include +#include +#include +#include #include #include #include @@ -156,8 +157,10 @@ struct clk *icst_clk_register(struct device *dev, icst->lockreg = base + desc->lock_offset; clk = clk_register(dev, &icst->hw); - if (IS_ERR(clk)) + if (IS_ERR(clk)) { + kfree(pclone); kfree(icst); + } return clk; } diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c index 1cc1330dc..65c842a21 100644 --- a/drivers/clk/versatile/clk-impd1.c +++ b/drivers/clk/versatile/clk-impd1.c @@ -7,7 +7,6 @@ * published by the Free Software Foundation. */ #include -#include #include #include #include diff --git a/drivers/clk/versatile/clk-realview.c b/drivers/clk/versatile/clk-realview.c index c8b523117..86f70997d 100644 --- a/drivers/clk/versatile/clk-realview.c +++ b/drivers/clk/versatile/clk-realview.c @@ -6,7 +6,6 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#include #include #include #include @@ -33,13 +32,13 @@ static const struct icst_params realview_oscvco_params = { .idx2s = icst307_idx2s, }; -static const struct clk_icst_desc __initdata realview_osc0_desc = { +static const struct clk_icst_desc realview_osc0_desc __initconst = { .params = &realview_oscvco_params, .vco_offset = REALVIEW_SYS_OSC0_OFFSET, .lock_offset = REALVIEW_SYS_LOCK_OFFSET, }; -static const struct clk_icst_desc __initdata realview_osc4_desc = { +static const struct clk_icst_desc realview_osc4_desc __initconst = { .params = &realview_oscvco_params, .vco_offset = REALVIEW_SYS_OSC4_OFFSET, .lock_offset = REALVIEW_SYS_LOCK_OFFSET, diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c index b674ffc4f..a1cdef6b0 100644 --- a/drivers/clk/versatile/clk-sp810.c +++ b/drivers/clk/versatile/clk-sp810.c @@ -12,7 +12,8 @@ */ #include -#include +#include +#include #include #include #include @@ -32,12 +33,9 @@ struct clk_sp810_timerclken { struct clk_sp810 { struct device_node *node; - int refclk_index, timclk_index; void __iomem *base; spinlock_t lock; struct clk_sp810_timerclken timerclken[4]; - struct clk *refclk; - struct clk *timclk; }; static u8 clk_sp810_timerclken_get_parent(struct clk_hw *hw) @@ -70,55 +68,7 @@ static int clk_sp810_timerclken_set_parent(struct clk_hw *hw, u8 index) return 0; } -/* - * FIXME - setting the parent every time .prepare is invoked is inefficient. - * This is better handled by a dedicated clock tree configuration mechanism at - * init-time. Revisit this later when such a mechanism exists - */ -static int clk_sp810_timerclken_prepare(struct clk_hw *hw) -{ - struct clk_sp810_timerclken *timerclken = to_clk_sp810_timerclken(hw); - struct clk_sp810 *sp810 = timerclken->sp810; - struct clk *old_parent = __clk_get_parent(hw->clk); - struct clk *new_parent; - - if (!sp810->refclk) - sp810->refclk = of_clk_get(sp810->node, sp810->refclk_index); - - if (!sp810->timclk) - sp810->timclk = of_clk_get(sp810->node, sp810->timclk_index); - - if (WARN_ON(IS_ERR(sp810->refclk) || IS_ERR(sp810->timclk))) - return -ENOENT; - - /* Select fastest parent */ - if (clk_get_rate(sp810->refclk) > clk_get_rate(sp810->timclk)) - new_parent = sp810->refclk; - else - new_parent = sp810->timclk; - - /* Switch the parent if necessary */ - if (old_parent != new_parent) { - clk_prepare(new_parent); - clk_set_parent(hw->clk, new_parent); - clk_unprepare(old_parent); - } - - return 0; -} - -static void clk_sp810_timerclken_unprepare(struct clk_hw *hw) -{ - struct clk_sp810_timerclken *timerclken = to_clk_sp810_timerclken(hw); - struct clk_sp810 *sp810 = timerclken->sp810; - - clk_put(sp810->timclk); - clk_put(sp810->refclk); -} - static const struct clk_ops clk_sp810_timerclken_ops = { - .prepare = clk_sp810_timerclken_prepare, - .unprepare = clk_sp810_timerclken_unprepare, .get_parent = clk_sp810_timerclken_get_parent, .set_parent = clk_sp810_timerclken_set_parent, }; @@ -139,24 +89,18 @@ static void __init clk_sp810_of_setup(struct device_node *node) { struct clk_sp810 *sp810 = kzalloc(sizeof(*sp810), GFP_KERNEL); const char *parent_names[2]; + int num = ARRAY_SIZE(parent_names); char name[12]; struct clk_init_data init; int i; + bool deprecated; if (!sp810) { pr_err("Failed to allocate memory for SP810!\n"); return; } - sp810->refclk_index = of_property_match_string(node, "clock-names", - "refclk"); - parent_names[0] = of_clk_get_parent_name(node, sp810->refclk_index); - - sp810->timclk_index = of_property_match_string(node, "clock-names", - "timclk"); - parent_names[1] = of_clk_get_parent_name(node, sp810->timclk_index); - - if (!parent_names[0] || !parent_names[1]) { + if (of_clk_parent_fill(node, parent_names, num) != num) { pr_warn("Failed to obtain parent clocks for SP810!\n"); return; } @@ -169,7 +113,9 @@ static void __init clk_sp810_of_setup(struct device_node *node) init.ops = &clk_sp810_timerclken_ops; init.flags = CLK_IS_BASIC; init.parent_names = parent_names; - init.num_parents = ARRAY_SIZE(parent_names); + init.num_parents = num; + + deprecated = !of_find_property(node, "assigned-clock-parents", NULL); for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) { snprintf(name, ARRAY_SIZE(name), "timerclken%d", i); @@ -178,6 +124,15 @@ static void __init clk_sp810_of_setup(struct device_node *node) sp810->timerclken[i].channel = i; sp810->timerclken[i].hw.init = &init; + /* + * If DT isn't setting the parent, force it to be + * the 1 MHz clock without going through the framework. + * We do this before clk_register() so that it can determine + * the parent and setup the tree properly. + */ + if (deprecated) + init.ops->set_parent(&sp810->timerclken[i].hw, 1); + sp810->timerclken[i].clk = clk_register(NULL, &sp810->timerclken[i].hw); WARN_ON(IS_ERR(sp810->timerclken[i].clk)); diff --git a/drivers/clk/versatile/clk-versatile.c b/drivers/clk/versatile/clk-versatile.c index 7a4f8635b..a89a92756 100644 --- a/drivers/clk/versatile/clk-versatile.c +++ b/drivers/clk/versatile/clk-versatile.c @@ -8,8 +8,6 @@ * published by the Free Software Foundation. */ #include -#include -#include #include #include #include @@ -35,7 +33,7 @@ static const struct icst_params cp_auxosc_params = { .idx2s = icst525_idx2s, }; -static const struct clk_icst_desc __initdata cm_auxosc_desc = { +static const struct clk_icst_desc cm_auxosc_desc __initconst = { .params = &cp_auxosc_params, .vco_offset = 0x1c, .lock_offset = INTEGRATOR_HDR_LOCK_OFFSET, diff --git a/drivers/clk/zte/Makefile b/drivers/clk/zte/Makefile index 95b707c18..74005aa32 100644 --- a/drivers/clk/zte/Makefile +++ b/drivers/clk/zte/Makefile @@ -1,2 +1,2 @@ -obj-y := clk-pll.o +obj-y := clk.o obj-$(CONFIG_SOC_ZX296702) += clk-zx296702.o diff --git a/drivers/clk/zte/clk-pll.c b/drivers/clk/zte/clk-pll.c deleted file mode 100644 index c3b221ae6..000000000 --- a/drivers/clk/zte/clk-pll.c +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright 2014 Linaro Ltd. - * Copyright (C) 2014 ZTE Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include - -#include "clk.h" - -#define to_clk_zx_pll(_hw) container_of(_hw, struct clk_zx_pll, hw) - -#define CFG0_CFG1_OFFSET 4 -#define LOCK_FLAG BIT(30) -#define POWER_DOWN BIT(31) - -static int rate_to_idx(struct clk_zx_pll *zx_pll, unsigned long rate) -{ - const struct zx_pll_config *config = zx_pll->lookup_table; - int i; - - for (i = 0; i < zx_pll->count; i++) { - if (config[i].rate > rate) - return i > 0 ? i - 1 : 0; - - if (config[i].rate == rate) - return i; - } - - return i - 1; -} - -static int hw_to_idx(struct clk_zx_pll *zx_pll) -{ - const struct zx_pll_config *config = zx_pll->lookup_table; - u32 hw_cfg0, hw_cfg1; - int i; - - hw_cfg0 = readl_relaxed(zx_pll->reg_base); - hw_cfg1 = readl_relaxed(zx_pll->reg_base + CFG0_CFG1_OFFSET); - - /* For matching the value in lookup table */ - hw_cfg0 &= ~LOCK_FLAG; - hw_cfg0 |= POWER_DOWN; - - for (i = 0; i < zx_pll->count; i++) { - if (hw_cfg0 == config[i].cfg0 && hw_cfg1 == config[i].cfg1) - return i; - } - - return -EINVAL; -} - -static unsigned long zx_pll_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) -{ - struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw); - int idx; - - idx = hw_to_idx(zx_pll); - if (unlikely(idx == -EINVAL)) - return 0; - - return zx_pll->lookup_table[idx].rate; -} - -static long zx_pll_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) -{ - struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw); - int idx; - - idx = rate_to_idx(zx_pll, rate); - - return zx_pll->lookup_table[idx].rate; -} - -static int zx_pll_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) -{ - /* Assume current cpu is not running on current PLL */ - struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw); - const struct zx_pll_config *config; - int idx; - - idx = rate_to_idx(zx_pll, rate); - config = &zx_pll->lookup_table[idx]; - - writel_relaxed(config->cfg0, zx_pll->reg_base); - writel_relaxed(config->cfg1, zx_pll->reg_base + CFG0_CFG1_OFFSET); - - return 0; -} - -static int zx_pll_enable(struct clk_hw *hw) -{ - struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw); - u32 reg; - - reg = readl_relaxed(zx_pll->reg_base); - writel_relaxed(reg & ~POWER_DOWN, zx_pll->reg_base); - - return readl_relaxed_poll_timeout(zx_pll->reg_base, reg, - reg & LOCK_FLAG, 0, 100); -} - -static void zx_pll_disable(struct clk_hw *hw) -{ - struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw); - u32 reg; - - reg = readl_relaxed(zx_pll->reg_base); - writel_relaxed(reg | POWER_DOWN, zx_pll->reg_base); -} - -static int zx_pll_is_enabled(struct clk_hw *hw) -{ - struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw); - u32 reg; - - reg = readl_relaxed(zx_pll->reg_base); - - return !(reg & POWER_DOWN); -} - -static const struct clk_ops zx_pll_ops = { - .recalc_rate = zx_pll_recalc_rate, - .round_rate = zx_pll_round_rate, - .set_rate = zx_pll_set_rate, - .enable = zx_pll_enable, - .disable = zx_pll_disable, - .is_enabled = zx_pll_is_enabled, -}; - -struct clk *clk_register_zx_pll(const char *name, const char *parent_name, - unsigned long flags, void __iomem *reg_base, - const struct zx_pll_config *lookup_table, int count, spinlock_t *lock) -{ - struct clk_zx_pll *zx_pll; - struct clk *clk; - struct clk_init_data init; - - zx_pll = kzalloc(sizeof(*zx_pll), GFP_KERNEL); - if (!zx_pll) - return ERR_PTR(-ENOMEM); - - init.name = name; - init.ops = &zx_pll_ops; - init.flags = flags; - init.parent_names = parent_name ? &parent_name : NULL; - init.num_parents = parent_name ? 1 : 0; - - zx_pll->reg_base = reg_base; - zx_pll->lookup_table = lookup_table; - zx_pll->count = count; - zx_pll->lock = lock; - zx_pll->hw.init = &init; - - clk = clk_register(NULL, &zx_pll->hw); - if (IS_ERR(clk)) - kfree(zx_pll); - - return clk; -} diff --git a/drivers/clk/zte/clk-zx296702.c b/drivers/clk/zte/clk-zx296702.c index 929d03359..ebd20d852 100644 --- a/drivers/clk/zte/clk-zx296702.c +++ b/drivers/clk/zte/clk-zx296702.c @@ -36,10 +36,21 @@ static struct clk_onecell_data lsp1clk_data; #define CLK_MUX1 (topcrm_base + 0x8c) #define CLK_SDMMC1 (lsp0crpm_base + 0x0c) +#define CLK_GPIO (lsp0crpm_base + 0x2c) +#define CLK_SPDIF0 (lsp0crpm_base + 0x10) +#define SPDIF0_DIV (lsp0crpm_base + 0x14) +#define CLK_I2S0 (lsp0crpm_base + 0x18) +#define I2S0_DIV (lsp0crpm_base + 0x1c) +#define CLK_I2S1 (lsp0crpm_base + 0x20) +#define I2S1_DIV (lsp0crpm_base + 0x24) +#define CLK_I2S2 (lsp0crpm_base + 0x34) +#define I2S2_DIV (lsp0crpm_base + 0x38) #define CLK_UART0 (lsp1crpm_base + 0x20) #define CLK_UART1 (lsp1crpm_base + 0x24) #define CLK_SDMMC0 (lsp1crpm_base + 0x2c) +#define CLK_SPDIF1 (lsp1crpm_base + 0x30) +#define SPDIF1_DIV (lsp1crpm_base + 0x34) static const struct zx_pll_config pll_a9_config[] = { { .rate = 700000000, .cfg0 = 0x800405d1, .cfg1 = 0x04555555 }, @@ -72,104 +83,119 @@ static const struct clk_div_table sec_wclk_divider[] = { { /* sentinel */ } }; -static const char * matrix_aclk_sel[] = { +static const char * const matrix_aclk_sel[] = { "pll_mm0_198M", "osc", "clk_148M5", "pll_lsp_104M", }; -static const char * a9_wclk_sel[] = { +static const char * const a9_wclk_sel[] = { "pll_a9", "osc", "clk_500", "clk_250", }; -static const char * a9_as1_aclk_sel[] = { +static const char * const a9_as1_aclk_sel[] = { "clk_250", "osc", "pll_mm0_396M", "pll_mac_333M", }; -static const char * a9_trace_clkin_sel[] = { +static const char * const a9_trace_clkin_sel[] = { "clk_74M25", "pll_mm1_108M", "clk_125", "clk_148M5", }; -static const char * decppu_aclk_sel[] = { +static const char * const decppu_aclk_sel[] = { "clk_250", "pll_mm0_198M", "pll_lsp_104M", "pll_audio_294M912", }; -static const char * vou_main_wclk_sel[] = { +static const char * const vou_main_wclk_sel[] = { "clk_148M5", "clk_74M25", "clk_27", "pll_mm1_54M", }; -static const char * vou_scaler_wclk_sel[] = { +static const char * const vou_scaler_wclk_sel[] = { "clk_250", "pll_mac_333M", "pll_audio_294M912", "pll_mm0_198M", }; -static const char * r2d_wclk_sel[] = { +static const char * const r2d_wclk_sel[] = { "pll_audio_294M912", "pll_mac_333M", "pll_a9_350M", "pll_mm0_396M", }; -static const char * ddr_wclk_sel[] = { +static const char * const ddr_wclk_sel[] = { "pll_mac_333M", "pll_ddr_266M", "pll_audio_294M912", "pll_mm0_198M", }; -static const char * nand_wclk_sel[] = { +static const char * const nand_wclk_sel[] = { "pll_lsp_104M", "osc", }; -static const char * lsp_26_wclk_sel[] = { +static const char * const lsp_26_wclk_sel[] = { "pll_lsp_26M", "osc", }; -static const char * vl0_sel[] = { +static const char * const vl0_sel[] = { "vou_main_channel_div", "vou_aux_channel_div", }; -static const char * hdmi_sel[] = { +static const char * const hdmi_sel[] = { "vou_main_channel_wclk", "vou_aux_channel_wclk", }; -static const char * sdmmc0_wclk_sel[] = { +static const char * const sdmmc0_wclk_sel[] = { "lsp1_104M_wclk", "lsp1_26M_wclk", }; -static const char * sdmmc1_wclk_sel[] = { +static const char * const sdmmc1_wclk_sel[] = { "lsp0_104M_wclk", "lsp0_26M_wclk", }; -static const char * uart_wclk_sel[] = { +static const char * const uart_wclk_sel[] = { "lsp1_104M_wclk", "lsp1_26M_wclk", }; +static const char * const spdif0_wclk_sel[] = { + "lsp0_104M_wclk", + "lsp0_26M_wclk", +}; + +static const char * const spdif1_wclk_sel[] = { + "lsp1_104M_wclk", + "lsp1_26M_wclk", +}; + +static const char * const i2s_wclk_sel[] = { + "lsp0_104M_wclk", + "lsp0_26M_wclk", +}; + static inline struct clk *zx_divtbl(const char *name, const char *parent, void __iomem *reg, u8 shift, u8 width, const struct clk_div_table *table) @@ -185,7 +211,7 @@ static inline struct clk *zx_div(const char *name, const char *parent, reg, shift, width, 0, ®_lock); } -static inline struct clk *zx_mux(const char *name, const char **parents, +static inline struct clk *zx_mux(const char *name, const char * const *parents, int num_parents, void __iomem *reg, u8 shift, u8 width) { return clk_register_mux(NULL, name, parents, num_parents, @@ -196,7 +222,7 @@ static inline struct clk *zx_gate(const char *name, const char *parent, void __iomem *reg, u8 shift) { return clk_register_gate(NULL, name, parent, CLK_IGNORE_UNUSED, - reg, shift, 0, ®_lock); + reg, shift, CLK_SET_RATE_PARENT, ®_lock); } static void __init zx296702_top_clocks_init(struct device_node *np) @@ -585,7 +611,57 @@ static void __init zx296702_lsp0_clocks_init(struct device_node *np) clk[ZX296702_SDMMC1_WCLK] = zx_gate("sdmmc1_wclk", "sdmmc1_wclk_div", CLK_SDMMC1, 1); clk[ZX296702_SDMMC1_PCLK] = - zx_gate("sdmmc1_pclk", "lsp1_apb_pclk", CLK_SDMMC1, 0); + zx_gate("sdmmc1_pclk", "lsp0_apb_pclk", CLK_SDMMC1, 0); + + clk[ZX296702_GPIO_CLK] = + zx_gate("gpio_clk", "lsp0_apb_pclk", CLK_GPIO, 0); + + /* SPDIF */ + clk[ZX296702_SPDIF0_WCLK_MUX] = + zx_mux("spdif0_wclk_mux", spdif0_wclk_sel, + ARRAY_SIZE(spdif0_wclk_sel), CLK_SPDIF0, 4, 1); + clk[ZX296702_SPDIF0_WCLK] = + zx_gate("spdif0_wclk", "spdif0_wclk_mux", CLK_SPDIF0, 1); + clk[ZX296702_SPDIF0_PCLK] = + zx_gate("spdif0_pclk", "lsp0_apb_pclk", CLK_SPDIF0, 0); + + clk[ZX296702_SPDIF0_DIV] = + clk_register_zx_audio("spdif0_div", "spdif0_wclk", 0, + SPDIF0_DIV); + + /* I2S */ + clk[ZX296702_I2S0_WCLK_MUX] = + zx_mux("i2s0_wclk_mux", i2s_wclk_sel, + ARRAY_SIZE(i2s_wclk_sel), CLK_I2S0, 4, 1); + clk[ZX296702_I2S0_WCLK] = + zx_gate("i2s0_wclk", "i2s0_wclk_mux", CLK_I2S0, 1); + clk[ZX296702_I2S0_PCLK] = + zx_gate("i2s0_pclk", "lsp0_apb_pclk", CLK_I2S0, 0); + + clk[ZX296702_I2S0_DIV] = + clk_register_zx_audio("i2s0_div", "i2s0_wclk", 0, I2S0_DIV); + + clk[ZX296702_I2S1_WCLK_MUX] = + zx_mux("i2s1_wclk_mux", i2s_wclk_sel, + ARRAY_SIZE(i2s_wclk_sel), CLK_I2S1, 4, 1); + clk[ZX296702_I2S1_WCLK] = + zx_gate("i2s1_wclk", "i2s1_wclk_mux", CLK_I2S1, 1); + clk[ZX296702_I2S1_PCLK] = + zx_gate("i2s1_pclk", "lsp0_apb_pclk", CLK_I2S1, 0); + + clk[ZX296702_I2S1_DIV] = + clk_register_zx_audio("i2s1_div", "i2s1_wclk", 0, I2S1_DIV); + + clk[ZX296702_I2S2_WCLK_MUX] = + zx_mux("i2s2_wclk_mux", i2s_wclk_sel, + ARRAY_SIZE(i2s_wclk_sel), CLK_I2S2, 4, 1); + clk[ZX296702_I2S2_WCLK] = + zx_gate("i2s2_wclk", "i2s2_wclk_mux", CLK_I2S2, 1); + clk[ZX296702_I2S2_PCLK] = + zx_gate("i2s2_pclk", "lsp0_apb_pclk", CLK_I2S2, 0); + + clk[ZX296702_I2S2_DIV] = + clk_register_zx_audio("i2s2_div", "i2s2_wclk", 0, I2S2_DIV); for (i = 0; i < ARRAY_SIZE(lsp0clk); i++) { if (IS_ERR(clk[i])) { @@ -641,6 +717,18 @@ static void __init zx296702_lsp1_clocks_init(struct device_node *np) clk[ZX296702_SDMMC0_PCLK] = zx_gate("sdmmc0_pclk", "lsp1_apb_pclk", CLK_SDMMC0, 0); + clk[ZX296702_SPDIF1_WCLK_MUX] = + zx_mux("spdif1_wclk_mux", spdif1_wclk_sel, + ARRAY_SIZE(spdif1_wclk_sel), CLK_SPDIF1, 4, 1); + clk[ZX296702_SPDIF1_WCLK] = + zx_gate("spdif1_wclk", "spdif1_wclk_mux", CLK_SPDIF1, 1); + clk[ZX296702_SPDIF1_PCLK] = + zx_gate("spdif1_pclk", "lsp1_apb_pclk", CLK_SPDIF1, 0); + + clk[ZX296702_SPDIF1_DIV] = + clk_register_zx_audio("spdif1_div", "spdif1_wclk", 0, + SPDIF1_DIV); + for (i = 0; i < ARRAY_SIZE(lsp1clk); i++) { if (IS_ERR(clk[i])) { pr_err("zx296702 clk %d: register failed with %ld\n", diff --git a/drivers/clk/zte/clk.c b/drivers/clk/zte/clk.c new file mode 100644 index 000000000..7c73c538c --- /dev/null +++ b/drivers/clk/zte/clk.c @@ -0,0 +1,309 @@ +/* + * Copyright 2014 Linaro Ltd. + * Copyright (C) 2014 ZTE Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "clk.h" + +#define to_clk_zx_pll(_hw) container_of(_hw, struct clk_zx_pll, hw) +#define to_clk_zx_audio(_hw) container_of(_hw, struct clk_zx_audio, hw) + +#define CFG0_CFG1_OFFSET 4 +#define LOCK_FLAG BIT(30) +#define POWER_DOWN BIT(31) + +static int rate_to_idx(struct clk_zx_pll *zx_pll, unsigned long rate) +{ + const struct zx_pll_config *config = zx_pll->lookup_table; + int i; + + for (i = 0; i < zx_pll->count; i++) { + if (config[i].rate > rate) + return i > 0 ? i - 1 : 0; + + if (config[i].rate == rate) + return i; + } + + return i - 1; +} + +static int hw_to_idx(struct clk_zx_pll *zx_pll) +{ + const struct zx_pll_config *config = zx_pll->lookup_table; + u32 hw_cfg0, hw_cfg1; + int i; + + hw_cfg0 = readl_relaxed(zx_pll->reg_base); + hw_cfg1 = readl_relaxed(zx_pll->reg_base + CFG0_CFG1_OFFSET); + + /* For matching the value in lookup table */ + hw_cfg0 &= ~LOCK_FLAG; + hw_cfg0 |= POWER_DOWN; + + for (i = 0; i < zx_pll->count; i++) { + if (hw_cfg0 == config[i].cfg0 && hw_cfg1 == config[i].cfg1) + return i; + } + + return -EINVAL; +} + +static unsigned long zx_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw); + int idx; + + idx = hw_to_idx(zx_pll); + if (unlikely(idx == -EINVAL)) + return 0; + + return zx_pll->lookup_table[idx].rate; +} + +static long zx_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw); + int idx; + + idx = rate_to_idx(zx_pll, rate); + + return zx_pll->lookup_table[idx].rate; +} + +static int zx_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + /* Assume current cpu is not running on current PLL */ + struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw); + const struct zx_pll_config *config; + int idx; + + idx = rate_to_idx(zx_pll, rate); + config = &zx_pll->lookup_table[idx]; + + writel_relaxed(config->cfg0, zx_pll->reg_base); + writel_relaxed(config->cfg1, zx_pll->reg_base + CFG0_CFG1_OFFSET); + + return 0; +} + +static int zx_pll_enable(struct clk_hw *hw) +{ + struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw); + u32 reg; + + reg = readl_relaxed(zx_pll->reg_base); + writel_relaxed(reg & ~POWER_DOWN, zx_pll->reg_base); + + return readl_relaxed_poll_timeout(zx_pll->reg_base, reg, + reg & LOCK_FLAG, 0, 100); +} + +static void zx_pll_disable(struct clk_hw *hw) +{ + struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw); + u32 reg; + + reg = readl_relaxed(zx_pll->reg_base); + writel_relaxed(reg | POWER_DOWN, zx_pll->reg_base); +} + +static int zx_pll_is_enabled(struct clk_hw *hw) +{ + struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw); + u32 reg; + + reg = readl_relaxed(zx_pll->reg_base); + + return !(reg & POWER_DOWN); +} + +static const struct clk_ops zx_pll_ops = { + .recalc_rate = zx_pll_recalc_rate, + .round_rate = zx_pll_round_rate, + .set_rate = zx_pll_set_rate, + .enable = zx_pll_enable, + .disable = zx_pll_disable, + .is_enabled = zx_pll_is_enabled, +}; + +struct clk *clk_register_zx_pll(const char *name, const char *parent_name, + unsigned long flags, void __iomem *reg_base, + const struct zx_pll_config *lookup_table, + int count, spinlock_t *lock) +{ + struct clk_zx_pll *zx_pll; + struct clk *clk; + struct clk_init_data init; + + zx_pll = kzalloc(sizeof(*zx_pll), GFP_KERNEL); + if (!zx_pll) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &zx_pll_ops; + init.flags = flags; + init.parent_names = parent_name ? &parent_name : NULL; + init.num_parents = parent_name ? 1 : 0; + + zx_pll->reg_base = reg_base; + zx_pll->lookup_table = lookup_table; + zx_pll->count = count; + zx_pll->lock = lock; + zx_pll->hw.init = &init; + + clk = clk_register(NULL, &zx_pll->hw); + if (IS_ERR(clk)) + kfree(zx_pll); + + return clk; +} + +#define BPAR 1000000 +static u32 calc_reg(u32 parent_rate, u32 rate) +{ + u32 sel, integ, fra_div, tmp; + u64 tmp64 = (u64)parent_rate * BPAR; + + do_div(tmp64, rate); + integ = (u32)tmp64 / BPAR; + integ = integ >> 1; + + tmp = (u32)tmp64 % BPAR; + sel = tmp / BPAR; + + tmp = tmp % BPAR; + fra_div = tmp * 0xff / BPAR; + tmp = (sel << 24) | (integ << 16) | (0xff << 8) | fra_div; + + /* Set I2S integer divider as 1. This bit is reserved for SPDIF + * and do no harm. + */ + tmp |= BIT(28); + return tmp; +} + +static u32 calc_rate(u32 reg, u32 parent_rate) +{ + u32 sel, integ, fra_div, tmp; + u64 tmp64 = (u64)parent_rate * BPAR; + + tmp = reg; + sel = (tmp >> 24) & BIT(0); + integ = (tmp >> 16) & 0xff; + fra_div = tmp & 0xff; + + tmp = fra_div * BPAR; + tmp = tmp / 0xff; + tmp += sel * BPAR; + tmp += 2 * integ * BPAR; + do_div(tmp64, tmp); + + return (u32)tmp64; +} + +static unsigned long zx_audio_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_zx_audio *zx_audio = to_clk_zx_audio(hw); + u32 reg; + + reg = readl_relaxed(zx_audio->reg_base); + return calc_rate(reg, parent_rate); +} + +static long zx_audio_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + u32 reg; + + if (rate * 2 > *prate) + return -EINVAL; + + reg = calc_reg(*prate, rate); + return calc_rate(reg, *prate); +} + +static int zx_audio_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_zx_audio *zx_audio = to_clk_zx_audio(hw); + u32 reg; + + reg = calc_reg(parent_rate, rate); + writel_relaxed(reg, zx_audio->reg_base); + + return 0; +} + +#define ZX_AUDIO_EN BIT(25) +static int zx_audio_enable(struct clk_hw *hw) +{ + struct clk_zx_audio *zx_audio = to_clk_zx_audio(hw); + u32 reg; + + reg = readl_relaxed(zx_audio->reg_base); + writel_relaxed(reg & ~ZX_AUDIO_EN, zx_audio->reg_base); + return 0; +} + +static void zx_audio_disable(struct clk_hw *hw) +{ + struct clk_zx_audio *zx_audio = to_clk_zx_audio(hw); + u32 reg; + + reg = readl_relaxed(zx_audio->reg_base); + writel_relaxed(reg | ZX_AUDIO_EN, zx_audio->reg_base); +} + +static const struct clk_ops zx_audio_ops = { + .recalc_rate = zx_audio_recalc_rate, + .round_rate = zx_audio_round_rate, + .set_rate = zx_audio_set_rate, + .enable = zx_audio_enable, + .disable = zx_audio_disable, +}; + +struct clk *clk_register_zx_audio(const char *name, + const char * const parent_name, + unsigned long flags, + void __iomem *reg_base) +{ + struct clk_zx_audio *zx_audio; + struct clk *clk; + struct clk_init_data init; + + zx_audio = kzalloc(sizeof(*zx_audio), GFP_KERNEL); + if (!zx_audio) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &zx_audio_ops; + init.flags = flags; + init.parent_names = parent_name ? &parent_name : NULL; + init.num_parents = parent_name ? 1 : 0; + + zx_audio->reg_base = reg_base; + zx_audio->hw.init = &init; + + clk = clk_register(NULL, &zx_audio->hw); + if (IS_ERR(clk)) + kfree(zx_audio); + + return clk; +} diff --git a/drivers/clk/zte/clk.h b/drivers/clk/zte/clk.h index 0914a82d0..65ae08b81 100644 --- a/drivers/clk/zte/clk.h +++ b/drivers/clk/zte/clk.h @@ -29,4 +29,13 @@ struct clk_zx_pll { struct clk *clk_register_zx_pll(const char *name, const char *parent_name, unsigned long flags, void __iomem *reg_base, const struct zx_pll_config *lookup_table, int count, spinlock_t *lock); + +struct clk_zx_audio { + struct clk_hw hw; + void __iomem *reg_base; +}; + +struct clk *clk_register_zx_audio(const char *name, + const char * const parent_name, + unsigned long flags, void __iomem *reg_base); #endif diff --git a/drivers/clk/zynq/Makefile b/drivers/clk/zynq/Makefile index 156d923f4..0afc2e7cc 100644 --- a/drivers/clk/zynq/Makefile +++ b/drivers/clk/zynq/Makefile @@ -1,3 +1,3 @@ # Zynq clock specific Makefile -obj-$(CONFIG_ARCH_ZYNQ) += clkc.o pll.o +obj-y += clkc.o pll.o diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c index de614384b..38a65c3e6 100644 --- a/drivers/clk/zynq/clkc.c +++ b/drivers/clk/zynq/clkc.c @@ -19,6 +19,7 @@ */ #include +#include #include #include #include diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 4e57730e0..a7726db13 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -111,6 +111,10 @@ config CLKSRC_LPC32XX select CLKSRC_MMIO select CLKSRC_OF +config CLKSRC_PISTACHIO + bool + select CLKSRC_OF + config CLKSRC_STM32 bool "Clocksource for STM32 SoCs" if !ARCH_STM32 depends on OF && ARM && (ARCH_STM32 || COMPILE_TEST) @@ -277,7 +281,7 @@ config CLKSRC_MIPS_GIC config CLKSRC_PXA def_bool y if ARCH_PXA || ARCH_SA1100 - select CLKSRC_OF if USE_OF + select CLKSRC_OF if OF help This enables OST0 support available on PXA and SA-11x0 platforms. @@ -293,4 +297,12 @@ config CLKSRC_IMX_GPT depends on ARM && CLKDEV_LOOKUP select CLKSRC_MMIO +config CLKSRC_ST_LPC + bool + depends on ARCH_STI + select CLKSRC_OF if OF + help + Enable this option to use the Low Power controller timer + as clocksource. + endmenu diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index f22835496..5c00863c3 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -44,6 +44,7 @@ obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o obj-$(CONFIG_MTK_TIMER) += mtk_timer.o +obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o @@ -60,3 +61,4 @@ obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o obj-$(CONFIG_H8300) += h8300_timer8.o obj-$(CONFIG_H8300_TMR16) += h8300_timer16.o obj-$(CONFIG_H8300_TPU) += h8300_tpu.o +obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 0aa135ddb..d6e3e4939 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -181,44 +181,36 @@ static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id) return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt); } -static __always_inline void timer_set_mode(const int access, int mode, - struct clock_event_device *clk) +static __always_inline int timer_shutdown(const int access, + struct clock_event_device *clk) { unsigned long ctrl; - switch (mode) { - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); - ctrl &= ~ARCH_TIMER_CTRL_ENABLE; - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); - break; - default: - break; - } + + ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); + ctrl &= ~ARCH_TIMER_CTRL_ENABLE; + arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); + + return 0; } -static void arch_timer_set_mode_virt(enum clock_event_mode mode, - struct clock_event_device *clk) +static int arch_timer_shutdown_virt(struct clock_event_device *clk) { - timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode, clk); + return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk); } -static void arch_timer_set_mode_phys(enum clock_event_mode mode, - struct clock_event_device *clk) +static int arch_timer_shutdown_phys(struct clock_event_device *clk) { - timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode, clk); + return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk); } -static void arch_timer_set_mode_virt_mem(enum clock_event_mode mode, - struct clock_event_device *clk) +static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk) { - timer_set_mode(ARCH_TIMER_MEM_VIRT_ACCESS, mode, clk); + return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk); } -static void arch_timer_set_mode_phys_mem(enum clock_event_mode mode, - struct clock_event_device *clk) +static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk) { - timer_set_mode(ARCH_TIMER_MEM_PHYS_ACCESS, mode, clk); + return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk); } static __always_inline void set_next_event(const int access, unsigned long evt, @@ -273,11 +265,11 @@ static void __arch_timer_setup(unsigned type, clk->cpumask = cpumask_of(smp_processor_id()); if (arch_timer_use_virtual) { clk->irq = arch_timer_ppi[VIRT_PPI]; - clk->set_mode = arch_timer_set_mode_virt; + clk->set_state_shutdown = arch_timer_shutdown_virt; clk->set_next_event = arch_timer_set_next_event_virt; } else { clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; - clk->set_mode = arch_timer_set_mode_phys; + clk->set_state_shutdown = arch_timer_shutdown_phys; clk->set_next_event = arch_timer_set_next_event_phys; } } else { @@ -286,17 +278,17 @@ static void __arch_timer_setup(unsigned type, clk->rating = 400; clk->cpumask = cpu_all_mask; if (arch_timer_mem_use_virtual) { - clk->set_mode = arch_timer_set_mode_virt_mem; + clk->set_state_shutdown = arch_timer_shutdown_virt_mem; clk->set_next_event = arch_timer_set_next_event_virt_mem; } else { - clk->set_mode = arch_timer_set_mode_phys_mem; + clk->set_state_shutdown = arch_timer_shutdown_phys_mem; clk->set_next_event = arch_timer_set_next_event_phys_mem; } } - clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, clk); + clk->set_state_shutdown(clk); clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff); } @@ -506,7 +498,7 @@ static void arch_timer_stop(struct clock_event_device *clk) disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]); } - clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk); + clk->set_state_shutdown(clk); } static int arch_timer_cpu_notify(struct notifier_block *self, diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c index e6833771a..a2cb6fae9 100644 --- a/drivers/clocksource/arm_global_timer.c +++ b/drivers/clocksource/arm_global_timer.c @@ -60,7 +60,7 @@ static struct clock_event_device __percpu *gt_evt; * different to the 32-bit upper value read previously, go back to step 2. * Otherwise the 64-bit timer counter value is correct. */ -static u64 gt_counter_read(void) +static u64 notrace _gt_counter_read(void) { u64 counter; u32 lower; @@ -79,6 +79,11 @@ static u64 gt_counter_read(void) return counter; } +static u64 gt_counter_read(void) +{ + return _gt_counter_read(); +} + /** * To ensure that updates to comparator value register do not set the * Interrupt Status Register proceed as follows: @@ -107,26 +112,21 @@ static void gt_compare_set(unsigned long delta, int periodic) writel(ctrl, gt_base + GT_CONTROL); } -static void gt_clockevent_set_mode(enum clock_event_mode mode, - struct clock_event_device *clk) +static int gt_clockevent_shutdown(struct clock_event_device *evt) { unsigned long ctrl; - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - gt_compare_set(DIV_ROUND_CLOSEST(gt_clk_rate, HZ), 1); - break; - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - ctrl = readl(gt_base + GT_CONTROL); - ctrl &= ~(GT_CONTROL_COMP_ENABLE | - GT_CONTROL_IRQ_ENABLE | GT_CONTROL_AUTO_INC); - writel(ctrl, gt_base + GT_CONTROL); - break; - default: - break; - } + ctrl = readl(gt_base + GT_CONTROL); + ctrl &= ~(GT_CONTROL_COMP_ENABLE | GT_CONTROL_IRQ_ENABLE | + GT_CONTROL_AUTO_INC); + writel(ctrl, gt_base + GT_CONTROL); + return 0; +} + +static int gt_clockevent_set_periodic(struct clock_event_device *evt) +{ + gt_compare_set(DIV_ROUND_CLOSEST(gt_clk_rate, HZ), 1); + return 0; } static int gt_clockevent_set_next_event(unsigned long evt, @@ -155,7 +155,7 @@ static irqreturn_t gt_clockevent_interrupt(int irq, void *dev_id) * the Global Timer flag _after_ having incremented * the Comparator register value to a higher value. */ - if (evt->mode == CLOCK_EVT_MODE_ONESHOT) + if (clockevent_state_oneshot(evt)) gt_compare_set(ULONG_MAX, 0); writel_relaxed(GT_INT_STATUS_EVENT_FLAG, gt_base + GT_INT_STATUS); @@ -171,7 +171,9 @@ static int gt_clockevents_init(struct clock_event_device *clk) clk->name = "arm_global_timer"; clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERCPU; - clk->set_mode = gt_clockevent_set_mode; + clk->set_state_shutdown = gt_clockevent_shutdown; + clk->set_state_periodic = gt_clockevent_set_periodic; + clk->set_state_oneshot = gt_clockevent_shutdown; clk->set_next_event = gt_clockevent_set_next_event; clk->cpumask = cpumask_of(cpu); clk->rating = 300; @@ -184,7 +186,7 @@ static int gt_clockevents_init(struct clock_event_device *clk) static void gt_clockevents_stop(struct clock_event_device *clk) { - gt_clockevent_set_mode(CLOCK_EVT_MODE_UNUSED, clk); + gt_clockevent_shutdown(clk); disable_percpu_irq(clk->irq); } @@ -204,7 +206,7 @@ static struct clocksource gt_clocksource = { #ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK static u64 notrace gt_sched_clock_read(void) { - return gt_counter_read(); + return _gt_counter_read(); } #endif diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c index 4c2ba5989..217438d39 100644 --- a/drivers/clocksource/asm9260_timer.c +++ b/drivers/clocksource/asm9260_timer.c @@ -120,38 +120,52 @@ static int asm9260_timer_set_next_event(unsigned long delta, return 0; } -static void asm9260_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static inline void __asm9260_timer_shutdown(struct clock_event_device *evt) { /* stop timer0 */ writel_relaxed(BM_C0_EN, priv.base + HW_TCR + CLR_REG); +} + +static int asm9260_timer_shutdown(struct clock_event_device *evt) +{ + __asm9260_timer_shutdown(evt); + return 0; +} + +static int asm9260_timer_set_oneshot(struct clock_event_device *evt) +{ + __asm9260_timer_shutdown(evt); + + /* enable reset and stop on match */ + writel_relaxed(BM_MCR_RES_EN(0) | BM_MCR_STOP_EN(0), + priv.base + HW_MCR + SET_REG); + return 0; +} + +static int asm9260_timer_set_periodic(struct clock_event_device *evt) +{ + __asm9260_timer_shutdown(evt); - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - /* disable reset and stop on match */ - writel_relaxed(BM_MCR_RES_EN(0) | BM_MCR_STOP_EN(0), - priv.base + HW_MCR + CLR_REG); - /* configure match count for TC0 */ - writel_relaxed(priv.ticks_per_jiffy, priv.base + HW_MR0); - /* enable TC0 */ - writel_relaxed(BM_C0_EN, priv.base + HW_TCR + SET_REG); - break; - case CLOCK_EVT_MODE_ONESHOT: - /* enable reset and stop on match */ - writel_relaxed(BM_MCR_RES_EN(0) | BM_MCR_STOP_EN(0), - priv.base + HW_MCR + SET_REG); - break; - default: - break; - } + /* disable reset and stop on match */ + writel_relaxed(BM_MCR_RES_EN(0) | BM_MCR_STOP_EN(0), + priv.base + HW_MCR + CLR_REG); + /* configure match count for TC0 */ + writel_relaxed(priv.ticks_per_jiffy, priv.base + HW_MR0); + /* enable TC0 */ + writel_relaxed(BM_C0_EN, priv.base + HW_TCR + SET_REG); + return 0; } static struct clock_event_device event_dev = { - .name = DRIVER_NAME, - .rating = 200, - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_next_event = asm9260_timer_set_next_event, - .set_mode = asm9260_timer_set_mode, + .name = DRIVER_NAME, + .rating = 200, + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .set_next_event = asm9260_timer_set_next_event, + .set_state_shutdown = asm9260_timer_shutdown, + .set_state_periodic = asm9260_timer_set_periodic, + .set_state_oneshot = asm9260_timer_set_oneshot, + .tick_resume = asm9260_timer_shutdown, }; static irqreturn_t asm9260_timer_interrupt(int irq, void *dev_id) diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c index 26ed331b1..6f2822928 100644 --- a/drivers/clocksource/bcm2835_timer.c +++ b/drivers/clocksource/bcm2835_timer.c @@ -54,21 +54,6 @@ static u64 notrace bcm2835_sched_read(void) return readl_relaxed(system_clock); } -static void bcm2835_time_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt_dev) -{ - switch (mode) { - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_RESUME: - break; - default: - WARN(1, "%s: unhandled event mode %d\n", __func__, mode); - break; - } -} - static int bcm2835_time_set_next_event(unsigned long event, struct clock_event_device *evt_dev) { @@ -129,7 +114,6 @@ static void __init bcm2835_timer_init(struct device_node *node) timer->evt.name = node->name; timer->evt.rating = 300; timer->evt.features = CLOCK_EVT_FEAT_ONESHOT; - timer->evt.set_mode = bcm2835_time_set_mode; timer->evt.set_next_event = bcm2835_time_set_next_event; timer->evt.cpumask = cpumask_of(0); timer->act.name = node->name; diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c index f1e33d08d..e717e87df 100644 --- a/drivers/clocksource/bcm_kona_timer.c +++ b/drivers/clocksource/bcm_kona_timer.c @@ -127,25 +127,18 @@ static int kona_timer_set_next_event(unsigned long clc, return 0; } -static void kona_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *unused) +static int kona_timer_shutdown(struct clock_event_device *evt) { - switch (mode) { - case CLOCK_EVT_MODE_ONESHOT: - /* by default mode is one shot don't do any thing */ - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - default: - kona_timer_disable_and_clear(timers.tmr_regs); - } + kona_timer_disable_and_clear(timers.tmr_regs); + return 0; } static struct clock_event_device kona_clockevent_timer = { .name = "timer 1", .features = CLOCK_EVT_FEAT_ONESHOT, .set_next_event = kona_timer_set_next_event, - .set_mode = kona_timer_set_mode + .set_state_shutdown = kona_timer_shutdown, + .tick_resume = kona_timer_shutdown, }; static void __init kona_timer_clockevents_init(void) diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c index 510c8a1d3..9be6018bd 100644 --- a/drivers/clocksource/cadence_ttc_timer.c +++ b/drivers/clocksource/cadence_ttc_timer.c @@ -16,7 +16,6 @@ */ #include -#include #include #include #include @@ -191,40 +190,42 @@ static int ttc_set_next_event(unsigned long cycles, } /** - * ttc_set_mode - Sets the mode of timer + * ttc_set_{shutdown|oneshot|periodic} - Sets the state of timer * - * @mode: Mode to be set * @evt: Address of clock event instance **/ -static void ttc_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int ttc_shutdown(struct clock_event_device *evt) { struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt); struct ttc_timer *timer = &ttce->ttc; u32 ctrl_reg; - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - ttc_set_interval(timer, DIV_ROUND_CLOSEST(ttce->ttc.freq, - PRESCALE * HZ)); - break; - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - ctrl_reg = readl_relaxed(timer->base_addr + - TTC_CNT_CNTRL_OFFSET); - ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK; - writel_relaxed(ctrl_reg, - timer->base_addr + TTC_CNT_CNTRL_OFFSET); - break; - case CLOCK_EVT_MODE_RESUME: - ctrl_reg = readl_relaxed(timer->base_addr + - TTC_CNT_CNTRL_OFFSET); - ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK; - writel_relaxed(ctrl_reg, - timer->base_addr + TTC_CNT_CNTRL_OFFSET); - break; - } + ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET); + ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK; + writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); + return 0; +} + +static int ttc_set_periodic(struct clock_event_device *evt) +{ + struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt); + struct ttc_timer *timer = &ttce->ttc; + + ttc_set_interval(timer, + DIV_ROUND_CLOSEST(ttce->ttc.freq, PRESCALE * HZ)); + return 0; +} + +static int ttc_resume(struct clock_event_device *evt) +{ + struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt); + struct ttc_timer *timer = &ttce->ttc; + u32 ctrl_reg; + + ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET); + ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK; + writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); + return 0; } static int ttc_rate_change_clocksource_cb(struct notifier_block *nb, @@ -430,7 +431,10 @@ static void __init ttc_setup_clockevent(struct clk *clk, ttcce->ce.name = "ttc_clockevent"; ttcce->ce.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; ttcce->ce.set_next_event = ttc_set_next_event; - ttcce->ce.set_mode = ttc_set_mode; + ttcce->ce.set_state_shutdown = ttc_shutdown; + ttcce->ce.set_state_periodic = ttc_set_periodic; + ttcce->ce.set_state_oneshot = ttc_shutdown; + ttcce->ce.tick_resume = ttc_resume; ttcce->ce.rating = 200; ttcce->ce.irq = irq; ttcce->ce.cpumask = cpu_possible_mask; diff --git a/drivers/clocksource/clksrc_st_lpc.c b/drivers/clocksource/clksrc_st_lpc.c new file mode 100644 index 000000000..65ec46744 --- /dev/null +++ b/drivers/clocksource/clksrc_st_lpc.c @@ -0,0 +1,131 @@ +/* + * Clocksource using the Low Power Timer found in the Low Power Controller (LPC) + * + * Copyright (C) 2015 STMicroelectronics – All Rights Reserved + * + * Author(s): Francesco Virlinzi + * Ajit Pal Singh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +#include + +/* Low Power Timer */ +#define LPC_LPT_LSB_OFF 0x400 +#define LPC_LPT_MSB_OFF 0x404 +#define LPC_LPT_START_OFF 0x408 + +static struct st_clksrc_ddata { + struct clk *clk; + void __iomem *base; +} ddata; + +static void __init st_clksrc_reset(void) +{ + writel_relaxed(0, ddata.base + LPC_LPT_START_OFF); + writel_relaxed(0, ddata.base + LPC_LPT_MSB_OFF); + writel_relaxed(0, ddata.base + LPC_LPT_LSB_OFF); + writel_relaxed(1, ddata.base + LPC_LPT_START_OFF); +} + +static u64 notrace st_clksrc_sched_clock_read(void) +{ + return (u64)readl_relaxed(ddata.base + LPC_LPT_LSB_OFF); +} + +static int __init st_clksrc_init(void) +{ + unsigned long rate; + int ret; + + st_clksrc_reset(); + + rate = clk_get_rate(ddata.clk); + + sched_clock_register(st_clksrc_sched_clock_read, 32, rate); + + ret = clocksource_mmio_init(ddata.base + LPC_LPT_LSB_OFF, + "clksrc-st-lpc", rate, 300, 32, + clocksource_mmio_readl_up); + if (ret) { + pr_err("clksrc-st-lpc: Failed to register clocksource\n"); + return ret; + } + + return 0; +} + +static int __init st_clksrc_setup_clk(struct device_node *np) +{ + struct clk *clk; + + clk = of_clk_get(np, 0); + if (IS_ERR(clk)) { + pr_err("clksrc-st-lpc: Failed to get LPC clock\n"); + return PTR_ERR(clk); + } + + if (clk_prepare_enable(clk)) { + pr_err("clksrc-st-lpc: Failed to enable LPC clock\n"); + return -EINVAL; + } + + if (!clk_get_rate(clk)) { + pr_err("clksrc-st-lpc: Failed to get LPC clock rate\n"); + clk_disable_unprepare(clk); + return -EINVAL; + } + + ddata.clk = clk; + + return 0; +} + +static void __init st_clksrc_of_register(struct device_node *np) +{ + int ret; + uint32_t mode; + + ret = of_property_read_u32(np, "st,lpc-mode", &mode); + if (ret) { + pr_err("clksrc-st-lpc: An LPC mode must be provided\n"); + return; + } + + /* LPC can either run as a Clocksource or in RTC or WDT mode */ + if (mode != ST_LPC_MODE_CLKSRC) + return; + + ddata.base = of_iomap(np, 0); + if (!ddata.base) { + pr_err("clksrc-st-lpc: Unable to map iomem\n"); + return; + } + + if (st_clksrc_setup_clk(np)) { + iounmap(ddata.base); + return; + } + + if (st_clksrc_init()) { + clk_disable_unprepare(ddata.clk); + clk_put(ddata.clk); + iounmap(ddata.base); + return; + } + + pr_info("clksrc-st-lpc: clocksource initialised - running @ %luHz\n", + clk_get_rate(ddata.clk)); +} +CLOCKSOURCE_OF_DECLARE(ddata, "st,stih407-lpc", st_clksrc_of_register); diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c index d83ec1f2f..cdd86e352 100644 --- a/drivers/clocksource/clps711x-timer.c +++ b/drivers/clocksource/clps711x-timer.c @@ -61,11 +61,6 @@ static irqreturn_t clps711x_timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static void clps711x_clockevent_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ -} - static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base, unsigned int irq) { @@ -91,7 +86,6 @@ static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base, clkevt->name = "clps711x-clockevent"; clkevt->rating = 300; clkevt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_C3STOP; - clkevt->set_mode = clps711x_clockevent_set_mode; clkevt->cpumask = cpumask_of(0); clockevents_config_and_register(clkevt, HZ, 0, 0); diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c index db2105290..9a7e37cf5 100644 --- a/drivers/clocksource/cs5535-clockevt.c +++ b/drivers/clocksource/cs5535-clockevt.c @@ -42,7 +42,6 @@ MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks."); * 256 128 .125 512.000 */ -static unsigned int cs5535_tick_mode = CLOCK_EVT_MODE_SHUTDOWN; static struct cs5535_mfgpt_timer *cs5535_event_clock; /* Selected from the table above */ @@ -77,15 +76,17 @@ static void start_timer(struct cs5535_mfgpt_timer *timer, uint16_t delta) MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2); } -static void mfgpt_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int mfgpt_shutdown(struct clock_event_device *evt) { disable_timer(cs5535_event_clock); + return 0; +} - if (mode == CLOCK_EVT_MODE_PERIODIC) - start_timer(cs5535_event_clock, MFGPT_PERIODIC); - - cs5535_tick_mode = mode; +static int mfgpt_set_periodic(struct clock_event_device *evt) +{ + disable_timer(cs5535_event_clock); + start_timer(cs5535_event_clock, MFGPT_PERIODIC); + return 0; } static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt) @@ -97,7 +98,10 @@ static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt) static struct clock_event_device cs5535_clockevent = { .name = DRV_NAME, .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_mode = mfgpt_set_mode, + .set_state_shutdown = mfgpt_shutdown, + .set_state_periodic = mfgpt_set_periodic, + .set_state_oneshot = mfgpt_shutdown, + .tick_resume = mfgpt_shutdown, .set_next_event = mfgpt_next_event, .rating = 250, }; @@ -113,7 +117,7 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id) /* Turn off the clock (and clear the event) */ disable_timer(cs5535_event_clock); - if (cs5535_tick_mode == CLOCK_EVT_MODE_SHUTDOWN) + if (clockevent_state_shutdown(&cs5535_clockevent)) return IRQ_HANDLED; /* Clear the counter */ @@ -121,7 +125,7 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id) /* Restart the clock in periodic mode */ - if (cs5535_tick_mode == CLOCK_EVT_MODE_PERIODIC) + if (clockevent_state_periodic(&cs5535_clockevent)) cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2); diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c index 31990600f..776b6c86d 100644 --- a/drivers/clocksource/dummy_timer.c +++ b/drivers/clocksource/dummy_timer.c @@ -16,15 +16,6 @@ static DEFINE_PER_CPU(struct clock_event_device, dummy_timer_evt); -static void dummy_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ - /* - * Core clockevents code will call this when exchanging timer devices. - * We don't need to do anything here. - */ -} - static void dummy_timer_setup(void) { int cpu = smp_processor_id(); @@ -35,7 +26,6 @@ static void dummy_timer_setup(void) CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DUMMY; evt->rating = 100; - evt->set_mode = dummy_timer_set_mode; evt->cpumask = cpumask_of(cpu); clockevents_register_device(evt); diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c index 35a88097a..c76c75006 100644 --- a/drivers/clocksource/dw_apb_timer.c +++ b/drivers/clocksource/dw_apb_timer.c @@ -110,71 +110,87 @@ static void apbt_enable_int(struct dw_apb_timer *timer) apbt_writel(timer, ctrl, APBTMR_N_CONTROL); } -static void apbt_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int apbt_shutdown(struct clock_event_device *evt) { + struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); unsigned long ctrl; - unsigned long period; + + pr_debug("%s CPU %d state=shutdown\n", __func__, + cpumask_first(evt->cpumask)); + + ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL); + ctrl &= ~APBTMR_CONTROL_ENABLE; + apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); + return 0; +} + +static int apbt_set_oneshot(struct clock_event_device *evt) +{ struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); + unsigned long ctrl; - pr_debug("%s CPU %d mode=%d\n", __func__, - cpumask_first(evt->cpumask), - mode); - - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - period = DIV_ROUND_UP(dw_ced->timer.freq, HZ); - ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL); - ctrl |= APBTMR_CONTROL_MODE_PERIODIC; - apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); - /* - * DW APB p. 46, have to disable timer before load counter, - * may cause sync problem. - */ - ctrl &= ~APBTMR_CONTROL_ENABLE; - apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); - udelay(1); - pr_debug("Setting clock period %lu for HZ %d\n", period, HZ); - apbt_writel(&dw_ced->timer, period, APBTMR_N_LOAD_COUNT); - ctrl |= APBTMR_CONTROL_ENABLE; - apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); - break; - - case CLOCK_EVT_MODE_ONESHOT: - ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL); - /* - * set free running mode, this mode will let timer reload max - * timeout which will give time (3min on 25MHz clock) to rearm - * the next event, therefore emulate the one-shot mode. - */ - ctrl &= ~APBTMR_CONTROL_ENABLE; - ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC; - - apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); - /* write again to set free running mode */ - apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); - - /* - * DW APB p. 46, load counter with all 1s before starting free - * running mode. - */ - apbt_writel(&dw_ced->timer, ~0, APBTMR_N_LOAD_COUNT); - ctrl &= ~APBTMR_CONTROL_INT; - ctrl |= APBTMR_CONTROL_ENABLE; - apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); - break; - - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL); - ctrl &= ~APBTMR_CONTROL_ENABLE; - apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); - break; - - case CLOCK_EVT_MODE_RESUME: - apbt_enable_int(&dw_ced->timer); - break; - } + pr_debug("%s CPU %d state=oneshot\n", __func__, + cpumask_first(evt->cpumask)); + + ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL); + /* + * set free running mode, this mode will let timer reload max + * timeout which will give time (3min on 25MHz clock) to rearm + * the next event, therefore emulate the one-shot mode. + */ + ctrl &= ~APBTMR_CONTROL_ENABLE; + ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC; + + apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); + /* write again to set free running mode */ + apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); + + /* + * DW APB p. 46, load counter with all 1s before starting free + * running mode. + */ + apbt_writel(&dw_ced->timer, ~0, APBTMR_N_LOAD_COUNT); + ctrl &= ~APBTMR_CONTROL_INT; + ctrl |= APBTMR_CONTROL_ENABLE; + apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); + return 0; +} + +static int apbt_set_periodic(struct clock_event_device *evt) +{ + struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); + unsigned long period = DIV_ROUND_UP(dw_ced->timer.freq, HZ); + unsigned long ctrl; + + pr_debug("%s CPU %d state=periodic\n", __func__, + cpumask_first(evt->cpumask)); + + ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL); + ctrl |= APBTMR_CONTROL_MODE_PERIODIC; + apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); + /* + * DW APB p. 46, have to disable timer before load counter, + * may cause sync problem. + */ + ctrl &= ~APBTMR_CONTROL_ENABLE; + apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); + udelay(1); + pr_debug("Setting clock period %lu for HZ %d\n", period, HZ); + apbt_writel(&dw_ced->timer, period, APBTMR_N_LOAD_COUNT); + ctrl |= APBTMR_CONTROL_ENABLE; + apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); + return 0; +} + +static int apbt_resume(struct clock_event_device *evt) +{ + struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); + + pr_debug("%s CPU %d state=resume\n", __func__, + cpumask_first(evt->cpumask)); + + apbt_enable_int(&dw_ced->timer); + return 0; } static int apbt_next_event(unsigned long delta, @@ -232,8 +248,12 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, &dw_ced->ced); dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced); dw_ced->ced.cpumask = cpumask_of(cpu); - dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; - dw_ced->ced.set_mode = apbt_set_mode; + dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ; + dw_ced->ced.set_state_shutdown = apbt_shutdown; + dw_ced->ced.set_state_periodic = apbt_set_periodic; + dw_ced->ced.set_state_oneshot = apbt_set_oneshot; + dw_ced->ced.tick_resume = apbt_resume; dw_ced->ced.set_next_event = apbt_next_event; dw_ced->ced.irq = dw_ced->timer.irq; dw_ced->ced.rating = rating; diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c index dc3c6ee04..7a97a34db 100644 --- a/drivers/clocksource/em_sti.c +++ b/drivers/clocksource/em_sti.c @@ -251,33 +251,21 @@ static struct em_sti_priv *ced_to_em_sti(struct clock_event_device *ced) return container_of(ced, struct em_sti_priv, ced); } -static void em_sti_clock_event_mode(enum clock_event_mode mode, - struct clock_event_device *ced) +static int em_sti_clock_event_shutdown(struct clock_event_device *ced) { struct em_sti_priv *p = ced_to_em_sti(ced); + em_sti_stop(p, USER_CLOCKEVENT); + return 0; +} - /* deal with old setting first */ - switch (ced->mode) { - case CLOCK_EVT_MODE_ONESHOT: - em_sti_stop(p, USER_CLOCKEVENT); - break; - default: - break; - } +static int em_sti_clock_event_set_oneshot(struct clock_event_device *ced) +{ + struct em_sti_priv *p = ced_to_em_sti(ced); - switch (mode) { - case CLOCK_EVT_MODE_ONESHOT: - dev_info(&p->pdev->dev, "used for oneshot clock events\n"); - em_sti_start(p, USER_CLOCKEVENT); - clockevents_config(&p->ced, p->rate); - break; - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_UNUSED: - em_sti_stop(p, USER_CLOCKEVENT); - break; - default: - break; - } + dev_info(&p->pdev->dev, "used for oneshot clock events\n"); + em_sti_start(p, USER_CLOCKEVENT); + clockevents_config(&p->ced, p->rate); + return 0; } static int em_sti_clock_event_next(unsigned long delta, @@ -303,11 +291,12 @@ static void em_sti_register_clockevent(struct em_sti_priv *p) ced->rating = 200; ced->cpumask = cpu_possible_mask; ced->set_next_event = em_sti_clock_event_next; - ced->set_mode = em_sti_clock_event_mode; + ced->set_state_shutdown = em_sti_clock_event_shutdown; + ced->set_state_oneshot = em_sti_clock_event_set_oneshot; dev_info(&p->pdev->dev, "used for clock events\n"); - /* Register with dummy 1 Hz value, gets updated in ->set_mode() */ + /* Register with dummy 1 Hz value, gets updated in ->set_state_oneshot() */ clockevents_config_and_register(ced, 1, 2, 0xffffffff); } diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 9064ff743..029f96ab1 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -257,15 +257,14 @@ static void exynos4_mct_comp0_stop(void) exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB); } -static void exynos4_mct_comp0_start(enum clock_event_mode mode, - unsigned long cycles) +static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles) { unsigned int tcon; cycle_t comp_cycle; tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); - if (mode == CLOCK_EVT_MODE_PERIODIC) { + if (periodic) { tcon |= MCT_G_TCON_COMP0_AUTO_INC; exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR); } @@ -283,38 +282,38 @@ static void exynos4_mct_comp0_start(enum clock_event_mode mode, static int exynos4_comp_set_next_event(unsigned long cycles, struct clock_event_device *evt) { - exynos4_mct_comp0_start(evt->mode, cycles); + exynos4_mct_comp0_start(false, cycles); return 0; } -static void exynos4_comp_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int mct_set_state_shutdown(struct clock_event_device *evt) { - unsigned long cycles_per_jiffy; exynos4_mct_comp0_stop(); + return 0; +} - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - cycles_per_jiffy = - (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift); - exynos4_mct_comp0_start(mode, cycles_per_jiffy); - break; +static int mct_set_state_periodic(struct clock_event_device *evt) +{ + unsigned long cycles_per_jiffy; - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_RESUME: - break; - } + cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult) + >> evt->shift); + exynos4_mct_comp0_stop(); + exynos4_mct_comp0_start(true, cycles_per_jiffy); + return 0; } static struct clock_event_device mct_comp_device = { - .name = "mct-comp", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .rating = 250, - .set_next_event = exynos4_comp_set_next_event, - .set_mode = exynos4_comp_set_mode, + .name = "mct-comp", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .rating = 250, + .set_next_event = exynos4_comp_set_next_event, + .set_state_periodic = mct_set_state_periodic, + .set_state_shutdown = mct_set_state_shutdown, + .set_state_oneshot = mct_set_state_shutdown, + .tick_resume = mct_set_state_shutdown, }; static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id) @@ -390,39 +389,32 @@ static int exynos4_tick_set_next_event(unsigned long cycles, return 0; } -static inline void exynos4_tick_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int set_state_shutdown(struct clock_event_device *evt) +{ + exynos4_mct_tick_stop(this_cpu_ptr(&percpu_mct_tick)); + return 0; +} + +static int set_state_periodic(struct clock_event_device *evt) { struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); unsigned long cycles_per_jiffy; + cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult) + >> evt->shift); exynos4_mct_tick_stop(mevt); - - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - cycles_per_jiffy = - (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift); - exynos4_mct_tick_start(cycles_per_jiffy, mevt); - break; - - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_RESUME: - break; - } + exynos4_mct_tick_start(cycles_per_jiffy, mevt); + return 0; } static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) { - struct clock_event_device *evt = &mevt->evt; - /* * This is for supporting oneshot mode. * Mct would generate interrupt periodically * without explicit stopping. */ - if (evt->mode != CLOCK_EVT_MODE_PERIODIC) + if (!clockevent_state_periodic(&mevt->evt)) exynos4_mct_tick_stop(mevt); /* Clear the MCT tick interrupt */ @@ -442,20 +434,21 @@ static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) return IRQ_HANDLED; } -static int exynos4_local_timer_setup(struct clock_event_device *evt) +static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt) { - struct mct_clock_event_device *mevt; + struct clock_event_device *evt = &mevt->evt; unsigned int cpu = smp_processor_id(); - mevt = container_of(evt, struct mct_clock_event_device, evt); - mevt->base = EXYNOS4_MCT_L_BASE(cpu); snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu); evt->name = mevt->name; evt->cpumask = cpumask_of(cpu); evt->set_next_event = exynos4_tick_set_next_event; - evt->set_mode = exynos4_tick_set_mode; + evt->set_state_periodic = set_state_periodic; + evt->set_state_shutdown = set_state_shutdown; + evt->set_state_oneshot = set_state_shutdown; + evt->tick_resume = set_state_shutdown; evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; evt->rating = 450; @@ -477,9 +470,11 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt) return 0; } -static void exynos4_local_timer_stop(struct clock_event_device *evt) +static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt) { - evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); + struct clock_event_device *evt = &mevt->evt; + + evt->set_state_shutdown(evt); if (mct_int_type == MCT_INT_SPI) { if (evt->irq != -1) disable_irq_nosync(evt->irq); @@ -500,11 +495,11 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self, switch (action & ~CPU_TASKS_FROZEN) { case CPU_STARTING: mevt = this_cpu_ptr(&percpu_mct_tick); - exynos4_local_timer_setup(&mevt->evt); + exynos4_local_timer_setup(mevt); break; case CPU_DYING: mevt = this_cpu_ptr(&percpu_mct_tick); - exynos4_local_timer_stop(&mevt->evt); + exynos4_local_timer_stop(mevt); break; } @@ -570,7 +565,7 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem goto out_irq; /* Immediately configure the timer on the boot CPU */ - exynos4_local_timer_setup(&mevt->evt); + exynos4_local_timer_setup(mevt); return; out_irq: diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c index 454227d4f..10202f1fd 100644 --- a/drivers/clocksource/fsl_ftm_timer.c +++ b/drivers/clocksource/fsl_ftm_timer.c @@ -118,7 +118,7 @@ static inline void ftm_reset_counter(void __iomem *base) ftm_writel(0x00, base + FTM_CNT); } -static u64 ftm_read_sched_clock(void) +static u64 notrace ftm_read_sched_clock(void) { return ftm_readl(priv->clksrc_base + FTM_CNT); } @@ -153,19 +153,16 @@ static int ftm_set_next_event(unsigned long delta, return 0; } -static void ftm_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int ftm_set_oneshot(struct clock_event_device *evt) { - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - ftm_set_next_event(priv->periodic_cyc, evt); - break; - case CLOCK_EVT_MODE_ONESHOT: - ftm_counter_disable(priv->clkevt_base); - break; - default: - return; - } + ftm_counter_disable(priv->clkevt_base); + return 0; +} + +static int ftm_set_periodic(struct clock_event_device *evt) +{ + ftm_set_next_event(priv->periodic_cyc, evt); + return 0; } static irqreturn_t ftm_evt_interrupt(int irq, void *dev_id) @@ -174,7 +171,7 @@ static irqreturn_t ftm_evt_interrupt(int irq, void *dev_id) ftm_irq_acknowledge(priv->clkevt_base); - if (likely(evt->mode == CLOCK_EVT_MODE_ONESHOT)) { + if (likely(clockevent_state_oneshot(evt))) { ftm_irq_disable(priv->clkevt_base); ftm_counter_disable(priv->clkevt_base); } @@ -185,11 +182,13 @@ static irqreturn_t ftm_evt_interrupt(int irq, void *dev_id) } static struct clock_event_device ftm_clockevent = { - .name = "Freescale ftm timer", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_mode = ftm_set_mode, - .set_next_event = ftm_set_next_event, - .rating = 300, + .name = "Freescale ftm timer", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .set_state_periodic = ftm_set_periodic, + .set_state_oneshot = ftm_set_oneshot, + .set_next_event = ftm_set_next_event, + .rating = 300, }; static struct irqaction ftm_timer_irq = { diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c index 0214cb3a7..f9b3b7033 100644 --- a/drivers/clocksource/h8300_timer8.c +++ b/drivers/clocksource/h8300_timer8.c @@ -81,7 +81,7 @@ static irqreturn_t timer8_interrupt(int irq, void *dev_id) p->flags |= FLAG_IRQCONTEXT; ctrl_outw(p->tcora, p->mapbase + TCORA); if (!(p->flags & FLAG_SKIPEVENT)) { - if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) + if (clockevent_state_oneshot(&p->ced)) ctrl_outw(0x0000, p->mapbase + _8TCR); p->ced.event_handler(&p->ced); } @@ -169,29 +169,32 @@ static void timer8_clock_event_start(struct timer8_priv *p, int periodic) timer8_set_next(p, periodic?(p->rate + HZ/2) / HZ:0x10000); } -static void timer8_clock_event_mode(enum clock_event_mode mode, - struct clock_event_device *ced) +static int timer8_clock_event_shutdown(struct clock_event_device *ced) +{ + timer8_stop(ced_to_priv(ced)); + return 0; +} + +static int timer8_clock_event_periodic(struct clock_event_device *ced) { struct timer8_priv *p = ced_to_priv(ced); - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - dev_info(&p->pdev->dev, "used for periodic clock events\n"); - timer8_stop(p); - timer8_clock_event_start(p, PERIODIC); - break; - case CLOCK_EVT_MODE_ONESHOT: - dev_info(&p->pdev->dev, "used for oneshot clock events\n"); - timer8_stop(p); - timer8_clock_event_start(p, ONESHOT); - break; - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_UNUSED: - timer8_stop(p); - break; - default: - break; - } + dev_info(&p->pdev->dev, "used for periodic clock events\n"); + timer8_stop(p); + timer8_clock_event_start(p, PERIODIC); + + return 0; +} + +static int timer8_clock_event_oneshot(struct clock_event_device *ced) +{ + struct timer8_priv *p = ced_to_priv(ced); + + dev_info(&p->pdev->dev, "used for oneshot clock events\n"); + timer8_stop(p); + timer8_clock_event_start(p, ONESHOT); + + return 0; } static int timer8_clock_event_next(unsigned long delta, @@ -199,7 +202,7 @@ static int timer8_clock_event_next(unsigned long delta, { struct timer8_priv *p = ced_to_priv(ced); - BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); + BUG_ON(!clockevent_state_oneshot(ced)); timer8_set_next(p, delta - 1); return 0; @@ -246,7 +249,9 @@ static int timer8_setup(struct timer8_priv *p, p->ced.rating = 200; p->ced.cpumask = cpumask_of(0); p->ced.set_next_event = timer8_clock_event_next; - p->ced.set_mode = timer8_clock_event_mode; + p->ced.set_state_shutdown = timer8_clock_event_shutdown; + p->ced.set_state_periodic = timer8_clock_event_periodic; + p->ced.set_state_oneshot = timer8_clock_event_oneshot; ret = setup_irq(irq, &p->irqaction); if (ret < 0) { diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c index 14ee3efcc..0efd36e48 100644 --- a/drivers/clocksource/i8253.c +++ b/drivers/clocksource/i8253.c @@ -100,44 +100,40 @@ int __init clocksource_i8253_init(void) #endif #ifdef CONFIG_CLKEVT_I8253 -/* - * Initialize the PIT timer. - * - * This is also called after resume to bring the PIT into operation again. - */ -static void init_pit_timer(enum clock_event_mode mode, - struct clock_event_device *evt) +static int pit_shutdown(struct clock_event_device *evt) { + if (!clockevent_state_oneshot(evt) && !clockevent_state_periodic(evt)) + return 0; + raw_spin_lock(&i8253_lock); - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - /* binary, mode 2, LSB/MSB, ch 0 */ - outb_p(0x34, PIT_MODE); - outb_p(PIT_LATCH & 0xff , PIT_CH0); /* LSB */ - outb_p(PIT_LATCH >> 8 , PIT_CH0); /* MSB */ - break; - - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_UNUSED: - if (evt->mode == CLOCK_EVT_MODE_PERIODIC || - evt->mode == CLOCK_EVT_MODE_ONESHOT) { - outb_p(0x30, PIT_MODE); - outb_p(0, PIT_CH0); - outb_p(0, PIT_CH0); - } - break; - - case CLOCK_EVT_MODE_ONESHOT: - /* One shot setup */ - outb_p(0x38, PIT_MODE); - break; - - case CLOCK_EVT_MODE_RESUME: - /* Nothing to do here */ - break; - } + outb_p(0x30, PIT_MODE); + outb_p(0, PIT_CH0); + outb_p(0, PIT_CH0); + + raw_spin_unlock(&i8253_lock); + return 0; +} + +static int pit_set_oneshot(struct clock_event_device *evt) +{ + raw_spin_lock(&i8253_lock); + outb_p(0x38, PIT_MODE); + raw_spin_unlock(&i8253_lock); + return 0; +} + +static int pit_set_periodic(struct clock_event_device *evt) +{ + raw_spin_lock(&i8253_lock); + + /* binary, mode 2, LSB/MSB, ch 0 */ + outb_p(0x34, PIT_MODE); + outb_p(PIT_LATCH & 0xff, PIT_CH0); /* LSB */ + outb_p(PIT_LATCH >> 8, PIT_CH0); /* MSB */ + raw_spin_unlock(&i8253_lock); + return 0; } /* @@ -160,10 +156,11 @@ static int pit_next_event(unsigned long delta, struct clock_event_device *evt) * it can be solely used for the global tick. */ struct clock_event_device i8253_clockevent = { - .name = "pit", - .features = CLOCK_EVT_FEAT_PERIODIC, - .set_mode = init_pit_timer, - .set_next_event = pit_next_event, + .name = "pit", + .features = CLOCK_EVT_FEAT_PERIODIC, + .set_state_shutdown = pit_shutdown, + .set_state_periodic = pit_set_periodic, + .set_next_event = pit_next_event, }; /* @@ -172,8 +169,10 @@ struct clock_event_device i8253_clockevent = { */ void __init clockevent_i8253_init(bool oneshot) { - if (oneshot) + if (oneshot) { i8253_clockevent.features |= CLOCK_EVT_FEAT_ONESHOT; + i8253_clockevent.set_state_oneshot = pit_set_oneshot; + } /* * Start pit with the boot cpu mask. x86 might make it global * when it is used as broadcast device later. diff --git a/drivers/clocksource/meson6_timer.c b/drivers/clocksource/meson6_timer.c index 5c15cba41..1fa22c4d2 100644 --- a/drivers/clocksource/meson6_timer.c +++ b/drivers/clocksource/meson6_timer.c @@ -67,25 +67,25 @@ static void meson6_clkevt_time_start(unsigned char timer, bool periodic) writel(val | TIMER_ENABLE_BIT(timer), timer_base + TIMER_ISA_MUX); } -static void meson6_clkevt_mode(enum clock_event_mode mode, - struct clock_event_device *clk) +static int meson6_shutdown(struct clock_event_device *evt) { - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - meson6_clkevt_time_stop(CED_ID); - meson6_clkevt_time_setup(CED_ID, USEC_PER_SEC/HZ - 1); - meson6_clkevt_time_start(CED_ID, true); - break; - case CLOCK_EVT_MODE_ONESHOT: - meson6_clkevt_time_stop(CED_ID); - meson6_clkevt_time_start(CED_ID, false); - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - default: - meson6_clkevt_time_stop(CED_ID); - break; - } + meson6_clkevt_time_stop(CED_ID); + return 0; +} + +static int meson6_set_oneshot(struct clock_event_device *evt) +{ + meson6_clkevt_time_stop(CED_ID); + meson6_clkevt_time_start(CED_ID, false); + return 0; +} + +static int meson6_set_periodic(struct clock_event_device *evt) +{ + meson6_clkevt_time_stop(CED_ID); + meson6_clkevt_time_setup(CED_ID, USEC_PER_SEC / HZ - 1); + meson6_clkevt_time_start(CED_ID, true); + return 0; } static int meson6_clkevt_next_event(unsigned long evt, @@ -99,11 +99,15 @@ static int meson6_clkevt_next_event(unsigned long evt, } static struct clock_event_device meson6_clockevent = { - .name = "meson6_tick", - .rating = 400, - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_mode = meson6_clkevt_mode, - .set_next_event = meson6_clkevt_next_event, + .name = "meson6_tick", + .rating = 400, + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .set_state_shutdown = meson6_shutdown, + .set_state_periodic = meson6_set_periodic, + .set_state_oneshot = meson6_set_oneshot, + .tick_resume = meson6_shutdown, + .set_next_event = meson6_clkevt_next_event, }; static irqreturn_t meson6_timer_interrupt(int irq, void *dev_id) diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c index b7384b853..bcd5c0d60 100644 --- a/drivers/clocksource/metag_generic.c +++ b/drivers/clocksource/metag_generic.c @@ -56,25 +56,6 @@ static int metag_timer_set_next_event(unsigned long delta, return 0; } -static void metag_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ - switch (mode) { - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_RESUME: - break; - - case CLOCK_EVT_MODE_SHUTDOWN: - /* We should disable the IRQ here */ - break; - - case CLOCK_EVT_MODE_PERIODIC: - case CLOCK_EVT_MODE_UNUSED: - WARN_ON(1); - break; - }; -} - static cycle_t metag_clocksource_read(struct clocksource *cs) { return __core_reg_get(TXTIMER); @@ -129,7 +110,6 @@ static void arch_timer_setup(unsigned int cpu) clk->rating = 200, clk->shift = 12, clk->irq = tbisig_map(TBID_SIGNUM_TRT), - clk->set_mode = metag_timer_set_mode, clk->set_next_event = metag_timer_set_next_event, clk->mult = div_sc(hwtimer_freq, NSEC_PER_SEC, clk->shift); diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index b81ed1a53..02a1945e5 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c @@ -33,12 +33,6 @@ static int gic_next_event(unsigned long delta, struct clock_event_device *evt) return res; } -static void gic_set_clock_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ - /* Nothing to do ... */ -} - static irqreturn_t gic_compare_interrupt(int irq, void *dev_id) { struct clock_event_device *cd = dev_id; @@ -67,7 +61,6 @@ static void gic_clockevent_cpu_init(struct clock_event_device *cd) cd->irq = gic_timer_irq; cd->cpumask = cpumask_of(cpu); cd->set_next_event = gic_next_event; - cd->set_mode = gic_set_clock_mode; clockevents_config_and_register(cd, gic_frequency, 0x300, 0x7fffffff); @@ -79,6 +72,13 @@ static void gic_clockevent_cpu_exit(struct clock_event_device *cd) disable_percpu_irq(gic_timer_irq); } +static void gic_update_frequency(void *data) +{ + unsigned long rate = (unsigned long)data; + + clockevents_update_freq(this_cpu_ptr(&gic_clockevent_device), rate); +} + static int gic_cpu_notifier(struct notifier_block *nb, unsigned long action, void *data) { @@ -94,18 +94,40 @@ static int gic_cpu_notifier(struct notifier_block *nb, unsigned long action, return NOTIFY_OK; } +static int gic_clk_notifier(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct clk_notifier_data *cnd = data; + + if (action == POST_RATE_CHANGE) + on_each_cpu(gic_update_frequency, (void *)cnd->new_rate, 1); + + return NOTIFY_OK; +} + + static struct notifier_block gic_cpu_nb = { .notifier_call = gic_cpu_notifier, }; +static struct notifier_block gic_clk_nb = { + .notifier_call = gic_clk_notifier, +}; + static int gic_clockevent_init(void) { + int ret; + if (!cpu_has_counter || !gic_frequency) return -ENXIO; - setup_percpu_irq(gic_timer_irq, &gic_compare_irqaction); + ret = setup_percpu_irq(gic_timer_irq, &gic_compare_irqaction); + if (ret < 0) + return ret; - register_cpu_notifier(&gic_cpu_nb); + ret = register_cpu_notifier(&gic_cpu_nb); + if (ret < 0) + pr_warn("GIC: Unable to register CPU notifier\n"); gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device)); @@ -125,18 +147,17 @@ static struct clocksource gic_clocksource = { static void __init __gic_clocksource_init(void) { + int ret; + /* Set clocksource mask. */ gic_clocksource.mask = CLOCKSOURCE_MASK(gic_get_count_width()); /* Calculate a somewhat reasonable rating value. */ gic_clocksource.rating = 200 + gic_frequency / 10000000; - clocksource_register_hz(&gic_clocksource, gic_frequency); - - gic_clockevent_init(); - - /* And finally start the counter */ - gic_start_count(); + ret = clocksource_register_hz(&gic_clocksource, gic_frequency); + if (ret < 0) + pr_warn("GIC: Unable to register clocksource\n"); } void __init gic_clocksource_init(unsigned int frequency) @@ -146,11 +167,16 @@ void __init gic_clocksource_init(unsigned int frequency) GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_COMPARE); __gic_clocksource_init(); + gic_clockevent_init(); + + /* And finally start the counter */ + gic_start_count(); } static void __init gic_clocksource_of_init(struct device_node *node) { struct clk *clk; + int ret; if (WARN_ON(!gic_present || !node->parent || !of_device_is_compatible(node->parent, "mti,gic"))) @@ -158,8 +184,13 @@ static void __init gic_clocksource_of_init(struct device_node *node) clk = of_clk_get(node, 0); if (!IS_ERR(clk)) { + if (clk_prepare_enable(clk) < 0) { + pr_err("GIC failed to enable clock\n"); + clk_put(clk); + return; + } + gic_frequency = clk_get_rate(clk); - clk_put(clk); } else if (of_property_read_u32(node, "clock-frequency", &gic_frequency)) { pr_err("GIC frequency not specified.\n"); @@ -172,6 +203,15 @@ static void __init gic_clocksource_of_init(struct device_node *node) } __gic_clocksource_init(); + + ret = gic_clockevent_init(); + if (!ret && !IS_ERR(clk)) { + if (clk_notifier_register(clk, &gic_clk_nb) < 0) + pr_warn("GIC: Unable to register clock notifier\n"); + } + + /* And finally start the counter */ + gic_start_count(); } CLOCKSOURCE_OF_DECLARE(mips_gic_timer, "mti,gic-timer", gic_clocksource_of_init); diff --git a/drivers/clocksource/moxart_timer.c b/drivers/clocksource/moxart_timer.c index 5eb2c3593..19857af65 100644 --- a/drivers/clocksource/moxart_timer.c +++ b/drivers/clocksource/moxart_timer.c @@ -58,25 +58,24 @@ static void __iomem *base; static unsigned int clock_count_per_tick; -static void moxart_clkevt_mode(enum clock_event_mode mode, - struct clock_event_device *clk) +static int moxart_shutdown(struct clock_event_device *evt) { - switch (mode) { - case CLOCK_EVT_MODE_RESUME: - case CLOCK_EVT_MODE_ONESHOT: - writel(TIMER1_DISABLE, base + TIMER_CR); - writel(~0, base + TIMER1_BASE + REG_LOAD); - break; - case CLOCK_EVT_MODE_PERIODIC: - writel(clock_count_per_tick, base + TIMER1_BASE + REG_LOAD); - writel(TIMER1_ENABLE, base + TIMER_CR); - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - default: - writel(TIMER1_DISABLE, base + TIMER_CR); - break; - } + writel(TIMER1_DISABLE, base + TIMER_CR); + return 0; +} + +static int moxart_set_oneshot(struct clock_event_device *evt) +{ + writel(TIMER1_DISABLE, base + TIMER_CR); + writel(~0, base + TIMER1_BASE + REG_LOAD); + return 0; +} + +static int moxart_set_periodic(struct clock_event_device *evt) +{ + writel(clock_count_per_tick, base + TIMER1_BASE + REG_LOAD); + writel(TIMER1_ENABLE, base + TIMER_CR); + return 0; } static int moxart_clkevt_next_event(unsigned long cycles, @@ -95,11 +94,15 @@ static int moxart_clkevt_next_event(unsigned long cycles, } static struct clock_event_device moxart_clockevent = { - .name = "moxart_timer", - .rating = 200, - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_mode = moxart_clkevt_mode, - .set_next_event = moxart_clkevt_next_event, + .name = "moxart_timer", + .rating = 200, + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .set_state_shutdown = moxart_shutdown, + .set_state_periodic = moxart_set_periodic, + .set_state_oneshot = moxart_set_oneshot, + .tick_resume = moxart_set_oneshot, + .set_next_event = moxart_clkevt_next_event, }; static irqreturn_t moxart_timer_interrupt(int irq, void *dev_id) diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c index 68ab42356..50f0641c6 100644 --- a/drivers/clocksource/mtk_timer.c +++ b/drivers/clocksource/mtk_timer.c @@ -102,27 +102,20 @@ static void mtk_clkevt_time_start(struct mtk_clock_event_device *evt, evt->gpt_base + TIMER_CTRL_REG(timer)); } -static void mtk_clkevt_mode(enum clock_event_mode mode, - struct clock_event_device *clk) +static int mtk_clkevt_shutdown(struct clock_event_device *clk) +{ + mtk_clkevt_time_stop(to_mtk_clk(clk), GPT_CLK_EVT); + return 0; +} + +static int mtk_clkevt_set_periodic(struct clock_event_device *clk) { struct mtk_clock_event_device *evt = to_mtk_clk(clk); mtk_clkevt_time_stop(evt, GPT_CLK_EVT); - - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - mtk_clkevt_time_setup(evt, evt->ticks_per_jiffy, GPT_CLK_EVT); - mtk_clkevt_time_start(evt, true, GPT_CLK_EVT); - break; - case CLOCK_EVT_MODE_ONESHOT: - /* Timer is enabled in set_next_event */ - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - default: - /* No more interrupts will occur as source is disabled */ - break; - } + mtk_clkevt_time_setup(evt, evt->ticks_per_jiffy, GPT_CLK_EVT); + mtk_clkevt_time_start(evt, true, GPT_CLK_EVT); + return 0; } static int mtk_clkevt_next_event(unsigned long event, @@ -196,7 +189,10 @@ static void __init mtk_timer_init(struct device_node *node) evt->dev.name = "mtk_tick"; evt->dev.rating = 300; evt->dev.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; - evt->dev.set_mode = mtk_clkevt_mode; + evt->dev.set_state_shutdown = mtk_clkevt_shutdown; + evt->dev.set_state_periodic = mtk_clkevt_set_periodic; + evt->dev.set_state_oneshot = mtk_clkevt_shutdown; + evt->dev.tick_resume = mtk_clkevt_shutdown; evt->dev.set_next_event = mtk_clkevt_next_event; evt->dev.cpumask = cpu_possible_mask; diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c index 445b68a01..f5ce2961c 100644 --- a/drivers/clocksource/mxs_timer.c +++ b/drivers/clocksource/mxs_timer.c @@ -77,7 +77,6 @@ #define BV_TIMROTv2_TIMCTRLn_SELECT__TICK_ALWAYS 0xf static struct clock_event_device mxs_clockevent_device; -static enum clock_event_mode mxs_clockevent_mode = CLOCK_EVT_MODE_UNUSED; static void __iomem *mxs_timrot_base; static u32 timrot_major_version; @@ -141,64 +140,49 @@ static struct irqaction mxs_timer_irq = { .handler = mxs_timer_interrupt, }; -#ifdef DEBUG -static const char *clock_event_mode_label[] const = { - [CLOCK_EVT_MODE_PERIODIC] = "CLOCK_EVT_MODE_PERIODIC", - [CLOCK_EVT_MODE_ONESHOT] = "CLOCK_EVT_MODE_ONESHOT", - [CLOCK_EVT_MODE_SHUTDOWN] = "CLOCK_EVT_MODE_SHUTDOWN", - [CLOCK_EVT_MODE_UNUSED] = "CLOCK_EVT_MODE_UNUSED" -}; -#endif /* DEBUG */ - -static void mxs_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static void mxs_irq_clear(char *state) { /* Disable interrupt in timer module */ timrot_irq_disable(); - if (mode != mxs_clockevent_mode) { - /* Set event time into the furthest future */ - if (timrot_is_v1()) - __raw_writel(0xffff, - mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1)); - else - __raw_writel(0xffffffff, - mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1)); - - /* Clear pending interrupt */ - timrot_irq_acknowledge(); - } + /* Set event time into the furthest future */ + if (timrot_is_v1()) + __raw_writel(0xffff, mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1)); + else + __raw_writel(0xffffffff, + mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1)); + + /* Clear pending interrupt */ + timrot_irq_acknowledge(); #ifdef DEBUG - pr_info("%s: changing mode from %s to %s\n", __func__, - clock_event_mode_label[mxs_clockevent_mode], - clock_event_mode_label[mode]); + pr_info("%s: changing mode to %s\n", __func__, state) #endif /* DEBUG */ +} - /* Remember timer mode */ - mxs_clockevent_mode = mode; - - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - pr_err("%s: Periodic mode is not implemented\n", __func__); - break; - case CLOCK_EVT_MODE_ONESHOT: - timrot_irq_enable(); - break; - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_RESUME: - /* Left event sources disabled, no more interrupts appear */ - break; - } +static int mxs_shutdown(struct clock_event_device *evt) +{ + mxs_irq_clear("shutdown"); + + return 0; +} + +static int mxs_set_oneshot(struct clock_event_device *evt) +{ + if (clockevent_state_oneshot(evt)) + mxs_irq_clear("oneshot"); + timrot_irq_enable(); + return 0; } static struct clock_event_device mxs_clockevent_device = { - .name = "mxs_timrot", - .features = CLOCK_EVT_FEAT_ONESHOT, - .set_mode = mxs_set_mode, - .set_next_event = timrotv2_set_next_event, - .rating = 200, + .name = "mxs_timrot", + .features = CLOCK_EVT_FEAT_ONESHOT, + .set_state_shutdown = mxs_shutdown, + .set_state_oneshot = mxs_set_oneshot, + .tick_resume = mxs_shutdown, + .set_next_event = timrotv2_set_next_event, + .rating = 200, }; static int __init mxs_clockevent_init(struct clk *timer_clk) diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c index a709cfa49..bc8dd443c 100644 --- a/drivers/clocksource/nomadik-mtu.c +++ b/drivers/clocksource/nomadik-mtu.c @@ -119,28 +119,27 @@ static void nmdk_clkevt_reset(void) } } -static void nmdk_clkevt_mode(enum clock_event_mode mode, - struct clock_event_device *dev) +static int nmdk_clkevt_shutdown(struct clock_event_device *evt) { - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - clkevt_periodic = true; - nmdk_clkevt_reset(); - break; - case CLOCK_EVT_MODE_ONESHOT: - clkevt_periodic = false; - break; - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_UNUSED: - writel(0, mtu_base + MTU_IMSC); - /* disable timer */ - writel(0, mtu_base + MTU_CR(1)); - /* load some high default value */ - writel(0xffffffff, mtu_base + MTU_LR(1)); - break; - case CLOCK_EVT_MODE_RESUME: - break; - } + writel(0, mtu_base + MTU_IMSC); + /* disable timer */ + writel(0, mtu_base + MTU_CR(1)); + /* load some high default value */ + writel(0xffffffff, mtu_base + MTU_LR(1)); + return 0; +} + +static int nmdk_clkevt_set_oneshot(struct clock_event_device *evt) +{ + clkevt_periodic = false; + return 0; +} + +static int nmdk_clkevt_set_periodic(struct clock_event_device *evt) +{ + clkevt_periodic = true; + nmdk_clkevt_reset(); + return 0; } static void nmdk_clksrc_reset(void) @@ -163,13 +162,16 @@ static void nmdk_clkevt_resume(struct clock_event_device *cedev) } static struct clock_event_device nmdk_clkevt = { - .name = "mtu_1", - .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC | - CLOCK_EVT_FEAT_DYNIRQ, - .rating = 200, - .set_mode = nmdk_clkevt_mode, - .set_next_event = nmdk_clkevt_next, - .resume = nmdk_clkevt_resume, + .name = "mtu_1", + .features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_DYNIRQ, + .rating = 200, + .set_state_shutdown = nmdk_clkevt_shutdown, + .set_state_periodic = nmdk_clkevt_set_periodic, + .set_state_oneshot = nmdk_clkevt_set_oneshot, + .set_next_event = nmdk_clkevt_next, + .resume = nmdk_clkevt_resume, }; /* diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c index d9438af2b..45b6a4999 100644 --- a/drivers/clocksource/pxa_timer.c +++ b/drivers/clocksource/pxa_timer.c @@ -88,26 +88,12 @@ pxa_osmr0_set_next_event(unsigned long delta, struct clock_event_device *dev) return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0; } -static void -pxa_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *dev) +static int pxa_osmr0_shutdown(struct clock_event_device *evt) { - switch (mode) { - case CLOCK_EVT_MODE_ONESHOT: - timer_writel(timer_readl(OIER) & ~OIER_E0, OIER); - timer_writel(OSSR_M0, OSSR); - break; - - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - /* initializing, released, or preparing for suspend */ - timer_writel(timer_readl(OIER) & ~OIER_E0, OIER); - timer_writel(OSSR_M0, OSSR); - break; - - case CLOCK_EVT_MODE_RESUME: - case CLOCK_EVT_MODE_PERIODIC: - break; - } + /* initializing, released, or preparing for suspend */ + timer_writel(timer_readl(OIER) & ~OIER_E0, OIER); + timer_writel(OSSR_M0, OSSR); + return 0; } #ifdef CONFIG_PM @@ -147,13 +133,14 @@ static void pxa_timer_resume(struct clock_event_device *cedev) #endif static struct clock_event_device ckevt_pxa_osmr0 = { - .name = "osmr0", - .features = CLOCK_EVT_FEAT_ONESHOT, - .rating = 200, - .set_next_event = pxa_osmr0_set_next_event, - .set_mode = pxa_osmr0_set_mode, - .suspend = pxa_timer_suspend, - .resume = pxa_timer_resume, + .name = "osmr0", + .features = CLOCK_EVT_FEAT_ONESHOT, + .rating = 200, + .set_next_event = pxa_osmr0_set_next_event, + .set_state_shutdown = pxa_osmr0_shutdown, + .set_state_oneshot = pxa_osmr0_shutdown, + .suspend = pxa_timer_suspend, + .resume = pxa_timer_resume, }; static struct irqaction pxa_ost0_irq = { diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c index cba2d0155..f8e09f923 100644 --- a/drivers/clocksource/qcom-timer.c +++ b/drivers/clocksource/qcom-timer.c @@ -47,7 +47,7 @@ static irqreturn_t msm_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = dev_id; /* Stop the timer tick */ - if (evt->mode == CLOCK_EVT_MODE_ONESHOT) { + if (clockevent_state_oneshot(evt)) { u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); ctrl &= ~TIMER_ENABLE_EN; writel_relaxed(ctrl, event_base + TIMER_ENABLE); @@ -75,26 +75,14 @@ static int msm_timer_set_next_event(unsigned long cycles, return 0; } -static void msm_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int msm_timer_shutdown(struct clock_event_device *evt) { u32 ctrl; ctrl = readl_relaxed(event_base + TIMER_ENABLE); ctrl &= ~(TIMER_ENABLE_EN | TIMER_ENABLE_CLR_ON_MATCH_EN); - - switch (mode) { - case CLOCK_EVT_MODE_RESUME: - case CLOCK_EVT_MODE_PERIODIC: - break; - case CLOCK_EVT_MODE_ONESHOT: - /* Timer is enabled in set_next_event */ - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - break; - } writel_relaxed(ctrl, event_base + TIMER_ENABLE); + return 0; } static struct clock_event_device __percpu *msm_evt; @@ -126,7 +114,9 @@ static int msm_local_timer_setup(struct clock_event_device *evt) evt->name = "msm_timer"; evt->features = CLOCK_EVT_FEAT_ONESHOT; evt->rating = 200; - evt->set_mode = msm_timer_set_mode; + evt->set_state_shutdown = msm_timer_shutdown; + evt->set_state_oneshot = msm_timer_shutdown; + evt->tick_resume = msm_timer_shutdown; evt->set_next_event = msm_timer_set_next_event; evt->cpumask = cpumask_of(cpu); @@ -147,7 +137,7 @@ static int msm_local_timer_setup(struct clock_event_device *evt) static void msm_local_timer_stop(struct clock_event_device *evt) { - evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); + evt->set_state_shutdown(evt); disable_percpu_irq(evt->irq); } diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c index a35993baf..d3c1742de 100644 --- a/drivers/clocksource/rockchip_timer.c +++ b/drivers/clocksource/rockchip_timer.c @@ -82,23 +82,18 @@ static inline int rk_timer_set_next_event(unsigned long cycles, return 0; } -static inline void rk_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *ce) +static int rk_timer_shutdown(struct clock_event_device *ce) { - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - rk_timer_disable(ce); - rk_timer_update_counter(rk_timer(ce)->freq / HZ - 1, ce); - rk_timer_enable(ce, TIMER_MODE_FREE_RUNNING); - break; - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_RESUME: - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - rk_timer_disable(ce); - break; - } + rk_timer_disable(ce); + return 0; +} + +static int rk_timer_set_periodic(struct clock_event_device *ce) +{ + rk_timer_disable(ce); + rk_timer_update_counter(rk_timer(ce)->freq / HZ - 1, ce); + rk_timer_enable(ce, TIMER_MODE_FREE_RUNNING); + return 0; } static irqreturn_t rk_timer_interrupt(int irq, void *dev_id) @@ -107,7 +102,7 @@ static irqreturn_t rk_timer_interrupt(int irq, void *dev_id) rk_timer_interrupt_clear(ce); - if (ce->mode == CLOCK_EVT_MODE_ONESHOT) + if (clockevent_state_oneshot(ce)) rk_timer_disable(ce); ce->event_handler(ce); @@ -153,7 +148,7 @@ static void __init rk_timer_init(struct device_node *np) bc_timer.freq = clk_get_rate(timer_clk); irq = irq_of_parse_and_map(np, 0); - if (irq == NO_IRQ) { + if (!irq) { pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME); return; } @@ -161,7 +156,8 @@ static void __init rk_timer_init(struct device_node *np) ce->name = TIMER_NAME; ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; ce->set_next_event = rk_timer_set_next_event; - ce->set_mode = rk_timer_set_mode; + ce->set_state_shutdown = rk_timer_shutdown; + ce->set_state_periodic = rk_timer_set_periodic; ce->irq = irq; ce->cpumask = cpumask_of(0); ce->rating = 250; diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c index 5645cfc90..9502bc4c3 100644 --- a/drivers/clocksource/samsung_pwm_timer.c +++ b/drivers/clocksource/samsung_pwm_timer.c @@ -207,25 +207,18 @@ static int samsung_set_next_event(unsigned long cycles, return 0; } -static void samsung_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int samsung_shutdown(struct clock_event_device *evt) { samsung_time_stop(pwm.event_id); + return 0; +} - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - samsung_time_setup(pwm.event_id, pwm.clock_count_per_tick - 1); - samsung_time_start(pwm.event_id, true); - break; - - case CLOCK_EVT_MODE_ONESHOT: - break; - - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_RESUME: - break; - } +static int samsung_set_periodic(struct clock_event_device *evt) +{ + samsung_time_stop(pwm.event_id); + samsung_time_setup(pwm.event_id, pwm.clock_count_per_tick - 1); + samsung_time_start(pwm.event_id, true); + return 0; } static void samsung_clockevent_resume(struct clock_event_device *cev) @@ -240,12 +233,16 @@ static void samsung_clockevent_resume(struct clock_event_device *cev) } static struct clock_event_device time_event_device = { - .name = "samsung_event_timer", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .rating = 200, - .set_next_event = samsung_set_next_event, - .set_mode = samsung_set_mode, - .resume = samsung_clockevent_resume, + .name = "samsung_event_timer", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .rating = 200, + .set_next_event = samsung_set_next_event, + .set_state_shutdown = samsung_shutdown, + .set_state_periodic = samsung_set_periodic, + .set_state_oneshot = samsung_shutdown, + .tick_resume = samsung_shutdown, + .resume = samsung_clockevent_resume, }; static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id) @@ -310,7 +307,7 @@ static void samsung_clocksource_resume(struct clocksource *cs) samsung_time_start(pwm.source_id, true); } -static cycle_t samsung_clocksource_read(struct clocksource *c) +static cycle_t notrace samsung_clocksource_read(struct clocksource *c) { return ~readl_relaxed(pwm.source_reg); } diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index c96de1403..ba73a6eb8 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -538,7 +538,7 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) if (ch->flags & FLAG_CLOCKEVENT) { if (!(ch->flags & FLAG_SKIPEVENT)) { - if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) { + if (clockevent_state_oneshot(&ch->ced)) { ch->next_match_value = ch->max_match_value; ch->flags |= FLAG_REPROGRAM; } @@ -554,7 +554,7 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) sh_cmt_clock_event_program_verify(ch, 1); if (ch->flags & FLAG_CLOCKEVENT) - if ((ch->ced.mode == CLOCK_EVT_MODE_SHUTDOWN) + if ((clockevent_state_shutdown(&ch->ced)) || (ch->match_value == ch->next_match_value)) ch->flags &= ~FLAG_REPROGRAM; } @@ -726,39 +726,37 @@ static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic) sh_cmt_set_next(ch, ch->max_match_value); } -static void sh_cmt_clock_event_mode(enum clock_event_mode mode, - struct clock_event_device *ced) +static int sh_cmt_clock_event_shutdown(struct clock_event_device *ced) +{ + struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); + + sh_cmt_stop(ch, FLAG_CLOCKEVENT); + return 0; +} + +static int sh_cmt_clock_event_set_state(struct clock_event_device *ced, + int periodic) { struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); /* deal with old setting first */ - switch (ced->mode) { - case CLOCK_EVT_MODE_PERIODIC: - case CLOCK_EVT_MODE_ONESHOT: + if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced)) sh_cmt_stop(ch, FLAG_CLOCKEVENT); - break; - default: - break; - } - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - dev_info(&ch->cmt->pdev->dev, - "ch%u: used for periodic clock events\n", ch->index); - sh_cmt_clock_event_start(ch, 1); - break; - case CLOCK_EVT_MODE_ONESHOT: - dev_info(&ch->cmt->pdev->dev, - "ch%u: used for oneshot clock events\n", ch->index); - sh_cmt_clock_event_start(ch, 0); - break; - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_UNUSED: - sh_cmt_stop(ch, FLAG_CLOCKEVENT); - break; - default: - break; - } + dev_info(&ch->cmt->pdev->dev, "ch%u: used for %s clock events\n", + ch->index, periodic ? "periodic" : "oneshot"); + sh_cmt_clock_event_start(ch, periodic); + return 0; +} + +static int sh_cmt_clock_event_set_oneshot(struct clock_event_device *ced) +{ + return sh_cmt_clock_event_set_state(ced, 0); +} + +static int sh_cmt_clock_event_set_periodic(struct clock_event_device *ced) +{ + return sh_cmt_clock_event_set_state(ced, 1); } static int sh_cmt_clock_event_next(unsigned long delta, @@ -766,7 +764,7 @@ static int sh_cmt_clock_event_next(unsigned long delta, { struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); - BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); + BUG_ON(!clockevent_state_oneshot(ced)); if (likely(ch->flags & FLAG_IRQCONTEXT)) ch->next_match_value = delta - 1; else @@ -820,7 +818,9 @@ static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch, ced->rating = 125; ced->cpumask = cpu_possible_mask; ced->set_next_event = sh_cmt_clock_event_next; - ced->set_mode = sh_cmt_clock_event_mode; + ced->set_state_shutdown = sh_cmt_clock_event_shutdown; + ced->set_state_periodic = sh_cmt_clock_event_set_periodic; + ced->set_state_oneshot = sh_cmt_clock_event_set_oneshot; ced->suspend = sh_cmt_clock_event_suspend; ced->resume = sh_cmt_clock_event_resume; @@ -935,9 +935,6 @@ static int sh_cmt_map_memory(struct sh_cmt_device *cmt) static const struct platform_device_id sh_cmt_id_table[] = { { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] }, { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] }, - { "sh-cmt-32-fast", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT_FAST] }, - { "sh-cmt-48", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT] }, - { "sh-cmt-48-gen2", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT_GEN2] }, { } }; MODULE_DEVICE_TABLE(platform, sh_cmt_id_table); diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index 3d88698cf..53aa7e92a 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c @@ -276,36 +276,27 @@ static struct sh_mtu2_channel *ced_to_sh_mtu2(struct clock_event_device *ced) return container_of(ced, struct sh_mtu2_channel, ced); } -static void sh_mtu2_clock_event_mode(enum clock_event_mode mode, - struct clock_event_device *ced) +static int sh_mtu2_clock_event_shutdown(struct clock_event_device *ced) { struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced); - int disabled = 0; - /* deal with old setting first */ - switch (ced->mode) { - case CLOCK_EVT_MODE_PERIODIC: + if (clockevent_state_periodic(ced)) sh_mtu2_disable(ch); - disabled = 1; - break; - default: - break; - } - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - dev_info(&ch->mtu->pdev->dev, - "ch%u: used for periodic clock events\n", ch->index); - sh_mtu2_enable(ch); - break; - case CLOCK_EVT_MODE_UNUSED: - if (!disabled) - sh_mtu2_disable(ch); - break; - case CLOCK_EVT_MODE_SHUTDOWN: - default: - break; - } + return 0; +} + +static int sh_mtu2_clock_event_set_periodic(struct clock_event_device *ced) +{ + struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced); + + if (clockevent_state_periodic(ced)) + sh_mtu2_disable(ch); + + dev_info(&ch->mtu->pdev->dev, "ch%u: used for periodic clock events\n", + ch->index); + sh_mtu2_enable(ch); + return 0; } static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced) @@ -327,7 +318,8 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch, ced->features = CLOCK_EVT_FEAT_PERIODIC; ced->rating = 200; ced->cpumask = cpu_possible_mask; - ced->set_mode = sh_mtu2_clock_event_mode; + ced->set_state_shutdown = sh_mtu2_clock_event_shutdown; + ced->set_state_periodic = sh_mtu2_clock_event_set_periodic; ced->suspend = sh_mtu2_clock_event_suspend; ced->resume = sh_mtu2_clock_event_resume; diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index b6b8fa3cd..469e776ec 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -240,7 +240,7 @@ static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id) struct sh_tmu_channel *ch = dev_id; /* disable or acknowledge interrupt */ - if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) + if (clockevent_state_oneshot(&ch->ced)) sh_tmu_write(ch, TCR, TCR_TPSC_CLK4); else sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4); @@ -358,42 +358,38 @@ static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic) } } -static void sh_tmu_clock_event_mode(enum clock_event_mode mode, - struct clock_event_device *ced) +static int sh_tmu_clock_event_shutdown(struct clock_event_device *ced) +{ + struct sh_tmu_channel *ch = ced_to_sh_tmu(ced); + + if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced)) + sh_tmu_disable(ch); + return 0; +} + +static int sh_tmu_clock_event_set_state(struct clock_event_device *ced, + int periodic) { struct sh_tmu_channel *ch = ced_to_sh_tmu(ced); - int disabled = 0; /* deal with old setting first */ - switch (ced->mode) { - case CLOCK_EVT_MODE_PERIODIC: - case CLOCK_EVT_MODE_ONESHOT: + if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced)) sh_tmu_disable(ch); - disabled = 1; - break; - default: - break; - } - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - dev_info(&ch->tmu->pdev->dev, - "ch%u: used for periodic clock events\n", ch->index); - sh_tmu_clock_event_start(ch, 1); - break; - case CLOCK_EVT_MODE_ONESHOT: - dev_info(&ch->tmu->pdev->dev, - "ch%u: used for oneshot clock events\n", ch->index); - sh_tmu_clock_event_start(ch, 0); - break; - case CLOCK_EVT_MODE_UNUSED: - if (!disabled) - sh_tmu_disable(ch); - break; - case CLOCK_EVT_MODE_SHUTDOWN: - default: - break; - } + dev_info(&ch->tmu->pdev->dev, "ch%u: used for %s clock events\n", + ch->index, periodic ? "periodic" : "oneshot"); + sh_tmu_clock_event_start(ch, periodic); + return 0; +} + +static int sh_tmu_clock_event_set_oneshot(struct clock_event_device *ced) +{ + return sh_tmu_clock_event_set_state(ced, 0); +} + +static int sh_tmu_clock_event_set_periodic(struct clock_event_device *ced) +{ + return sh_tmu_clock_event_set_state(ced, 1); } static int sh_tmu_clock_event_next(unsigned long delta, @@ -401,7 +397,7 @@ static int sh_tmu_clock_event_next(unsigned long delta, { struct sh_tmu_channel *ch = ced_to_sh_tmu(ced); - BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); + BUG_ON(!clockevent_state_oneshot(ced)); /* program new delta value */ sh_tmu_set_next(ch, delta, 0); @@ -430,7 +426,9 @@ static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch, ced->rating = 200; ced->cpumask = cpu_possible_mask; ced->set_next_event = sh_tmu_clock_event_next; - ced->set_mode = sh_tmu_clock_event_mode; + ced->set_state_shutdown = sh_tmu_clock_event_shutdown; + ced->set_state_periodic = sh_tmu_clock_event_set_periodic; + ced->set_state_oneshot = sh_tmu_clock_event_set_oneshot; ced->suspend = sh_tmu_clock_event_suspend; ced->resume = sh_tmu_clock_event_resume; diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c index 1928a8912..6f3719d73 100644 --- a/drivers/clocksource/sun4i_timer.c +++ b/drivers/clocksource/sun4i_timer.c @@ -81,25 +81,25 @@ static void sun4i_clkevt_time_start(u8 timer, bool periodic) timer_base + TIMER_CTL_REG(timer)); } -static void sun4i_clkevt_mode(enum clock_event_mode mode, - struct clock_event_device *clk) +static int sun4i_clkevt_shutdown(struct clock_event_device *evt) { - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - sun4i_clkevt_time_stop(0); - sun4i_clkevt_time_setup(0, ticks_per_jiffy); - sun4i_clkevt_time_start(0, true); - break; - case CLOCK_EVT_MODE_ONESHOT: - sun4i_clkevt_time_stop(0); - sun4i_clkevt_time_start(0, false); - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - default: - sun4i_clkevt_time_stop(0); - break; - } + sun4i_clkevt_time_stop(0); + return 0; +} + +static int sun4i_clkevt_set_oneshot(struct clock_event_device *evt) +{ + sun4i_clkevt_time_stop(0); + sun4i_clkevt_time_start(0, false); + return 0; +} + +static int sun4i_clkevt_set_periodic(struct clock_event_device *evt) +{ + sun4i_clkevt_time_stop(0); + sun4i_clkevt_time_setup(0, ticks_per_jiffy); + sun4i_clkevt_time_start(0, true); + return 0; } static int sun4i_clkevt_next_event(unsigned long evt, @@ -116,7 +116,10 @@ static struct clock_event_device sun4i_clockevent = { .name = "sun4i_tick", .rating = 350, .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_mode = sun4i_clkevt_mode, + .set_state_shutdown = sun4i_clkevt_shutdown, + .set_state_periodic = sun4i_clkevt_set_periodic, + .set_state_oneshot = sun4i_clkevt_set_oneshot, + .tick_resume = sun4i_clkevt_shutdown, .set_next_event = sun4i_clkevt_next_event, }; diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 8bdbc45c6..d28d2fe79 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -91,55 +91,62 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt) */ static u32 timer_clock; -static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) +static int tc_shutdown(struct clock_event_device *d) { struct tc_clkevt_device *tcd = to_tc_clkevt(d); void __iomem *regs = tcd->regs; - if (tcd->clkevt.mode == CLOCK_EVT_MODE_PERIODIC - || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) { - __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); - __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); - clk_disable(tcd->clk); - } + __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); + __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); + clk_disable(tcd->clk); - switch (m) { + return 0; +} - /* By not making the gentime core emulate periodic mode on top - * of oneshot, we get lower overhead and improved accuracy. - */ - case CLOCK_EVT_MODE_PERIODIC: - clk_enable(tcd->clk); +static int tc_set_oneshot(struct clock_event_device *d) +{ + struct tc_clkevt_device *tcd = to_tc_clkevt(d); + void __iomem *regs = tcd->regs; - /* slow clock, count up to RC, then irq and restart */ - __raw_writel(timer_clock - | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, - regs + ATMEL_TC_REG(2, CMR)); - __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); + if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) + tc_shutdown(d); - /* Enable clock and interrupts on RC compare */ - __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); + clk_enable(tcd->clk); - /* go go gadget! */ - __raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, - regs + ATMEL_TC_REG(2, CCR)); - break; + /* slow clock, count up to RC, then irq and stop */ + __raw_writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE | + ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); + __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); - case CLOCK_EVT_MODE_ONESHOT: - clk_enable(tcd->clk); + /* set_next_event() configures and starts the timer */ + return 0; +} - /* slow clock, count up to RC, then irq and stop */ - __raw_writel(timer_clock | ATMEL_TC_CPCSTOP - | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, - regs + ATMEL_TC_REG(2, CMR)); - __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); +static int tc_set_periodic(struct clock_event_device *d) +{ + struct tc_clkevt_device *tcd = to_tc_clkevt(d); + void __iomem *regs = tcd->regs; - /* set_next_event() configures and starts the timer */ - break; + if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) + tc_shutdown(d); - default: - break; - } + /* By not making the gentime core emulate periodic mode on top + * of oneshot, we get lower overhead and improved accuracy. + */ + clk_enable(tcd->clk); + + /* slow clock, count up to RC, then irq and restart */ + __raw_writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, + regs + ATMEL_TC_REG(2, CMR)); + __raw_writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); + + /* Enable clock and interrupts on RC compare */ + __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); + + /* go go gadget! */ + __raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs + + ATMEL_TC_REG(2, CCR)); + return 0; } static int tc_next_event(unsigned long delta, struct clock_event_device *d) @@ -154,13 +161,15 @@ static int tc_next_event(unsigned long delta, struct clock_event_device *d) static struct tc_clkevt_device clkevt = { .clkevt = { - .name = "tc_clkevt", - .features = CLOCK_EVT_FEAT_PERIODIC - | CLOCK_EVT_FEAT_ONESHOT, + .name = "tc_clkevt", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, /* Should be lower than at91rm9200's system timer */ - .rating = 125, - .set_next_event = tc_next_event, - .set_mode = tc_mode, + .rating = 125, + .set_next_event = tc_next_event, + .set_state_shutdown = tc_shutdown, + .set_state_periodic = tc_set_periodic, + .set_state_oneshot = tc_set_oneshot, }, }; diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c index 5a112d72f..6ebda1177 100644 --- a/drivers/clocksource/tegra20_timer.c +++ b/drivers/clocksource/tegra20_timer.c @@ -72,33 +72,36 @@ static int tegra_timer_set_next_event(unsigned long cycles, return 0; } -static void tegra_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static inline void timer_shutdown(struct clock_event_device *evt) { - u32 reg; - timer_writel(0, TIMER3_BASE + TIMER_PTV); +} - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - reg = 0xC0000000 | ((1000000/HZ)-1); - timer_writel(reg, TIMER3_BASE + TIMER_PTV); - break; - case CLOCK_EVT_MODE_ONESHOT: - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_RESUME: - break; - } +static int tegra_timer_shutdown(struct clock_event_device *evt) +{ + timer_shutdown(evt); + return 0; +} + +static int tegra_timer_set_periodic(struct clock_event_device *evt) +{ + u32 reg = 0xC0000000 | ((1000000 / HZ) - 1); + + timer_shutdown(evt); + timer_writel(reg, TIMER3_BASE + TIMER_PTV); + return 0; } static struct clock_event_device tegra_clockevent = { - .name = "timer0", - .rating = 300, - .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, - .set_next_event = tegra_timer_set_next_event, - .set_mode = tegra_timer_set_mode, + .name = "timer0", + .rating = 300, + .features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC, + .set_next_event = tegra_timer_set_next_event, + .set_state_shutdown = tegra_timer_shutdown, + .set_state_periodic = tegra_timer_set_periodic, + .set_state_oneshot = tegra_timer_shutdown, + .tick_resume = tegra_timer_shutdown, }; static u64 notrace tegra_read_sched_clock(void) diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 0c8c5e337..2162796fd 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c @@ -121,33 +121,33 @@ armada_370_xp_clkevt_next_event(unsigned long delta, return 0; } -static void -armada_370_xp_clkevt_mode(enum clock_event_mode mode, - struct clock_event_device *dev) +static int armada_370_xp_clkevt_shutdown(struct clock_event_device *evt) { - if (mode == CLOCK_EVT_MODE_PERIODIC) { + /* + * Disable timer. + */ + local_timer_ctrl_clrset(TIMER0_EN, 0); - /* - * Setup timer to fire at 1/HZ intervals. - */ - writel(ticks_per_jiffy - 1, local_base + TIMER0_RELOAD_OFF); - writel(ticks_per_jiffy - 1, local_base + TIMER0_VAL_OFF); + /* + * ACK pending timer interrupt. + */ + writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS); + return 0; +} - /* - * Enable timer. - */ - local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask); - } else { - /* - * Disable timer. - */ - local_timer_ctrl_clrset(TIMER0_EN, 0); +static int armada_370_xp_clkevt_set_periodic(struct clock_event_device *evt) +{ + /* + * Setup timer to fire at 1/HZ intervals. + */ + writel(ticks_per_jiffy - 1, local_base + TIMER0_RELOAD_OFF); + writel(ticks_per_jiffy - 1, local_base + TIMER0_VAL_OFF); - /* - * ACK pending timer interrupt. - */ - writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS); - } + /* + * Enable timer. + */ + local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask); + return 0; } static int armada_370_xp_clkevt_irq; @@ -185,7 +185,10 @@ static int armada_370_xp_timer_setup(struct clock_event_device *evt) evt->shift = 32, evt->rating = 300, evt->set_next_event = armada_370_xp_clkevt_next_event, - evt->set_mode = armada_370_xp_clkevt_mode, + evt->set_state_shutdown = armada_370_xp_clkevt_shutdown; + evt->set_state_periodic = armada_370_xp_clkevt_set_periodic; + evt->set_state_oneshot = armada_370_xp_clkevt_shutdown; + evt->tick_resume = armada_370_xp_clkevt_shutdown; evt->irq = armada_370_xp_clkevt_irq; evt->cpumask = cpumask_of(cpu); @@ -197,7 +200,7 @@ static int armada_370_xp_timer_setup(struct clock_event_device *evt) static void armada_370_xp_timer_stop(struct clock_event_device *evt) { - evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); + evt->set_state_shutdown(evt); disable_percpu_irq(evt->irq); } diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c index 5b6e3d564..b06e4c2be 100644 --- a/drivers/clocksource/time-efm32.c +++ b/drivers/clocksource/time-efm32.c @@ -48,40 +48,42 @@ struct efm32_clock_event_ddata { unsigned periodic_top; }; -static void efm32_clock_event_set_mode(enum clock_event_mode mode, - struct clock_event_device *evtdev) +static int efm32_clock_event_shutdown(struct clock_event_device *evtdev) { struct efm32_clock_event_ddata *ddata = container_of(evtdev, struct efm32_clock_event_ddata, evtdev); - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); - writel_relaxed(ddata->periodic_top, ddata->base + TIMERn_TOP); - writel_relaxed(TIMERn_CTRL_PRESC_1024 | - TIMERn_CTRL_CLKSEL_PRESCHFPERCLK | - TIMERn_CTRL_MODE_DOWN, - ddata->base + TIMERn_CTRL); - writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD); - break; - - case CLOCK_EVT_MODE_ONESHOT: - writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); - writel_relaxed(TIMERn_CTRL_PRESC_1024 | - TIMERn_CTRL_CLKSEL_PRESCHFPERCLK | - TIMERn_CTRL_OSMEN | - TIMERn_CTRL_MODE_DOWN, - ddata->base + TIMERn_CTRL); - break; - - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); - break; - - case CLOCK_EVT_MODE_RESUME: - break; - } + writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); + return 0; +} + +static int efm32_clock_event_set_oneshot(struct clock_event_device *evtdev) +{ + struct efm32_clock_event_ddata *ddata = + container_of(evtdev, struct efm32_clock_event_ddata, evtdev); + + writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); + writel_relaxed(TIMERn_CTRL_PRESC_1024 | + TIMERn_CTRL_CLKSEL_PRESCHFPERCLK | + TIMERn_CTRL_OSMEN | + TIMERn_CTRL_MODE_DOWN, + ddata->base + TIMERn_CTRL); + return 0; +} + +static int efm32_clock_event_set_periodic(struct clock_event_device *evtdev) +{ + struct efm32_clock_event_ddata *ddata = + container_of(evtdev, struct efm32_clock_event_ddata, evtdev); + + writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); + writel_relaxed(ddata->periodic_top, ddata->base + TIMERn_TOP); + writel_relaxed(TIMERn_CTRL_PRESC_1024 | + TIMERn_CTRL_CLKSEL_PRESCHFPERCLK | + TIMERn_CTRL_MODE_DOWN, + ddata->base + TIMERn_CTRL); + writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD); + return 0; } static int efm32_clock_event_set_next_event(unsigned long evt, @@ -112,7 +114,9 @@ static struct efm32_clock_event_ddata clock_event_ddata = { .evtdev = { .name = "efm32 clockevent", .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, - .set_mode = efm32_clock_event_set_mode, + .set_state_shutdown = efm32_clock_event_shutdown, + .set_state_periodic = efm32_clock_event_set_periodic, + .set_state_oneshot = efm32_clock_event_set_oneshot, .set_next_event = efm32_clock_event_set_next_event, .rating = 200, }, diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c index 0b3ce0399..0ece7427b 100644 --- a/drivers/clocksource/time-orion.c +++ b/drivers/clocksource/time-orion.c @@ -60,30 +60,36 @@ static int orion_clkevt_next_event(unsigned long delta, return 0; } -static void orion_clkevt_mode(enum clock_event_mode mode, - struct clock_event_device *dev) +static int orion_clkevt_shutdown(struct clock_event_device *dev) { - if (mode == CLOCK_EVT_MODE_PERIODIC) { - /* setup and enable periodic timer at 1/HZ intervals */ - writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD); - writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL); - atomic_io_modify(timer_base + TIMER_CTRL, - TIMER1_RELOAD_EN | TIMER1_EN, - TIMER1_RELOAD_EN | TIMER1_EN); - } else { - /* disable timer */ - atomic_io_modify(timer_base + TIMER_CTRL, - TIMER1_RELOAD_EN | TIMER1_EN, 0); - } + /* disable timer */ + atomic_io_modify(timer_base + TIMER_CTRL, + TIMER1_RELOAD_EN | TIMER1_EN, 0); + return 0; +} + +static int orion_clkevt_set_periodic(struct clock_event_device *dev) +{ + /* setup and enable periodic timer at 1/HZ intervals */ + writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD); + writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL); + atomic_io_modify(timer_base + TIMER_CTRL, + TIMER1_RELOAD_EN | TIMER1_EN, + TIMER1_RELOAD_EN | TIMER1_EN); + return 0; } static struct clock_event_device orion_clkevt = { - .name = "orion_event", - .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, - .shift = 32, - .rating = 300, - .set_next_event = orion_clkevt_next_event, - .set_mode = orion_clkevt_mode, + .name = "orion_event", + .features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC, + .shift = 32, + .rating = 300, + .set_next_event = orion_clkevt_next_event, + .set_state_shutdown = orion_clkevt_shutdown, + .set_state_periodic = orion_clkevt_set_periodic, + .set_state_oneshot = orion_clkevt_shutdown, + .tick_resume = orion_clkevt_shutdown, }; static irqreturn_t orion_clkevt_irq_handler(int irq, void *dev_id) diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c new file mode 100644 index 000000000..bba679900 --- /dev/null +++ b/drivers/clocksource/time-pistachio.c @@ -0,0 +1,218 @@ +/* + * Pistachio clocksource based on general-purpose timers + * + * Copyright (C) 2015 Imagination Technologies + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Top level reg */ +#define CR_TIMER_CTRL_CFG 0x00 +#define TIMER_ME_GLOBAL BIT(0) +#define CR_TIMER_REV 0x10 + +/* Timer specific registers */ +#define TIMER_CFG 0x20 +#define TIMER_ME_LOCAL BIT(0) +#define TIMER_RELOAD_VALUE 0x24 +#define TIMER_CURRENT_VALUE 0x28 +#define TIMER_CURRENT_OVERFLOW_VALUE 0x2C +#define TIMER_IRQ_STATUS 0x30 +#define TIMER_IRQ_CLEAR 0x34 +#define TIMER_IRQ_MASK 0x38 + +#define PERIP_TIMER_CONTROL 0x90 + +/* Timer specific configuration Values */ +#define RELOAD_VALUE 0xffffffff + +struct pistachio_clocksource { + void __iomem *base; + raw_spinlock_t lock; + struct clocksource cs; +}; + +static struct pistachio_clocksource pcs_gpt; + +#define to_pistachio_clocksource(cs) \ + container_of(cs, struct pistachio_clocksource, cs) + +static inline u32 gpt_readl(void __iomem *base, u32 offset, u32 gpt_id) +{ + return readl(base + 0x20 * gpt_id + offset); +} + +static inline void gpt_writel(void __iomem *base, u32 value, u32 offset, + u32 gpt_id) +{ + writel(value, base + 0x20 * gpt_id + offset); +} + +static cycle_t notrace +pistachio_clocksource_read_cycles(struct clocksource *cs) +{ + struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); + u32 counter, overflw; + unsigned long flags; + + /* + * The counter value is only refreshed after the overflow value is read. + * And they must be read in strict order, hence raw spin lock added. + */ + + raw_spin_lock_irqsave(&pcs->lock, flags); + overflw = gpt_readl(pcs->base, TIMER_CURRENT_OVERFLOW_VALUE, 0); + counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0); + raw_spin_unlock_irqrestore(&pcs->lock, flags); + + return ~(cycle_t)counter; +} + +static u64 notrace pistachio_read_sched_clock(void) +{ + return pistachio_clocksource_read_cycles(&pcs_gpt.cs); +} + +static void pistachio_clksrc_set_mode(struct clocksource *cs, int timeridx, + int enable) +{ + struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); + u32 val; + + val = gpt_readl(pcs->base, TIMER_CFG, timeridx); + if (enable) + val |= TIMER_ME_LOCAL; + else + val &= ~TIMER_ME_LOCAL; + + gpt_writel(pcs->base, val, TIMER_CFG, timeridx); +} + +static void pistachio_clksrc_enable(struct clocksource *cs, int timeridx) +{ + struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); + + /* Disable GPT local before loading reload value */ + pistachio_clksrc_set_mode(cs, timeridx, false); + gpt_writel(pcs->base, RELOAD_VALUE, TIMER_RELOAD_VALUE, timeridx); + pistachio_clksrc_set_mode(cs, timeridx, true); +} + +static void pistachio_clksrc_disable(struct clocksource *cs, int timeridx) +{ + /* Disable GPT local */ + pistachio_clksrc_set_mode(cs, timeridx, false); +} + +static int pistachio_clocksource_enable(struct clocksource *cs) +{ + pistachio_clksrc_enable(cs, 0); + return 0; +} + +static void pistachio_clocksource_disable(struct clocksource *cs) +{ + pistachio_clksrc_disable(cs, 0); +} + +/* Desirable clock source for pistachio platform */ +static struct pistachio_clocksource pcs_gpt = { + .cs = { + .name = "gptimer", + .rating = 300, + .enable = pistachio_clocksource_enable, + .disable = pistachio_clocksource_disable, + .read = pistachio_clocksource_read_cycles, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS | + CLOCK_SOURCE_SUSPEND_NONSTOP, + }, +}; + +static void __init pistachio_clksrc_of_init(struct device_node *node) +{ + struct clk *sys_clk, *fast_clk; + struct regmap *periph_regs; + unsigned long rate; + int ret; + + pcs_gpt.base = of_iomap(node, 0); + if (!pcs_gpt.base) { + pr_err("cannot iomap\n"); + return; + } + + periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph"); + if (IS_ERR(periph_regs)) { + pr_err("cannot get peripheral regmap (%lu)\n", + PTR_ERR(periph_regs)); + return; + } + + /* Switch to using the fast counter clock */ + ret = regmap_update_bits(periph_regs, PERIP_TIMER_CONTROL, + 0xf, 0x0); + if (ret) + return; + + sys_clk = of_clk_get_by_name(node, "sys"); + if (IS_ERR(sys_clk)) { + pr_err("clock get failed (%lu)\n", PTR_ERR(sys_clk)); + return; + } + + fast_clk = of_clk_get_by_name(node, "fast"); + if (IS_ERR(fast_clk)) { + pr_err("clock get failed (%lu)\n", PTR_ERR(fast_clk)); + return; + } + + ret = clk_prepare_enable(sys_clk); + if (ret < 0) { + pr_err("failed to enable clock (%d)\n", ret); + return; + } + + ret = clk_prepare_enable(fast_clk); + if (ret < 0) { + pr_err("failed to enable clock (%d)\n", ret); + clk_disable_unprepare(sys_clk); + return; + } + + rate = clk_get_rate(fast_clk); + + /* Disable irq's for clocksource usage */ + gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 0); + gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 1); + gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 2); + gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 3); + + /* Enable timer block */ + writel(TIMER_ME_GLOBAL, pcs_gpt.base); + + raw_spin_lock_init(&pcs_gpt.lock); + sched_clock_register(pistachio_read_sched_clock, 32, rate); + clocksource_register_hz(&pcs_gpt.cs, rate); +} +CLOCKSOURCE_OF_DECLARE(pistachio_gptimer, "img,pistachio-gptimer", + pistachio_clksrc_of_init); diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c index 60f9de343..27fa13680 100644 --- a/drivers/clocksource/timer-atlas7.c +++ b/drivers/clocksource/timer-atlas7.c @@ -76,7 +76,7 @@ static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id) /* clear timer interrupt */ writel_relaxed(BIT(cpu), sirfsoc_timer_base + SIRFSOC_TIMER_INTR_STATUS); - if (ce->mode == CLOCK_EVT_MODE_ONESHOT) + if (clockevent_state_oneshot(ce)) sirfsoc_timer_count_disable(cpu); ce->event_handler(ce); @@ -117,18 +117,11 @@ static int sirfsoc_timer_set_next_event(unsigned long delta, return 0; } -static void sirfsoc_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *ce) +/* Oneshot is enabled in set_next_event */ +static int sirfsoc_timer_shutdown(struct clock_event_device *evt) { - switch (mode) { - case CLOCK_EVT_MODE_ONESHOT: - /* enable in set_next_event */ - break; - default: - break; - } - sirfsoc_timer_count_disable(smp_processor_id()); + return 0; } static void sirfsoc_clocksource_suspend(struct clocksource *cs) @@ -193,7 +186,9 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce) ce->name = "local_timer"; ce->features = CLOCK_EVT_FEAT_ONESHOT; ce->rating = 200; - ce->set_mode = sirfsoc_timer_set_mode; + ce->set_state_shutdown = sirfsoc_timer_shutdown; + ce->set_state_oneshot = sirfsoc_timer_shutdown; + ce->tick_resume = sirfsoc_timer_shutdown; ce->set_next_event = sirfsoc_timer_set_next_event; clockevents_calc_mult_shift(ce, atlas7_timer_rate, 60); ce->max_delta_ns = clockevent_delta2ns(-2, ce); diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c index c0304ff60..d911c5dca 100644 --- a/drivers/clocksource/timer-atmel-pit.c +++ b/drivers/clocksource/timer-atmel-pit.c @@ -90,33 +90,27 @@ static cycle_t read_pit_clk(struct clocksource *cs) return elapsed; } +static int pit_clkevt_shutdown(struct clock_event_device *dev) +{ + struct pit_data *data = clkevt_to_pit_data(dev); + + /* disable irq, leaving the clocksource active */ + pit_write(data->base, AT91_PIT_MR, (data->cycle - 1) | AT91_PIT_PITEN); + return 0; +} + /* * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16) */ -static void -pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev) +static int pit_clkevt_set_periodic(struct clock_event_device *dev) { struct pit_data *data = clkevt_to_pit_data(dev); - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - /* update clocksource counter */ - data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR)); - pit_write(data->base, AT91_PIT_MR, - (data->cycle - 1) | AT91_PIT_PITEN | AT91_PIT_PITIEN); - break; - case CLOCK_EVT_MODE_ONESHOT: - BUG(); - /* FALLTHROUGH */ - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_UNUSED: - /* disable irq, leaving the clocksource active */ - pit_write(data->base, AT91_PIT_MR, - (data->cycle - 1) | AT91_PIT_PITEN); - break; - case CLOCK_EVT_MODE_RESUME: - break; - } + /* update clocksource counter */ + data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR)); + pit_write(data->base, AT91_PIT_MR, + (data->cycle - 1) | AT91_PIT_PITEN | AT91_PIT_PITIEN); + return 0; } static void at91sam926x_pit_suspend(struct clock_event_device *cedev) @@ -162,7 +156,7 @@ static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id) WARN_ON_ONCE(!irqs_disabled()); /* The PIT interrupt may be disabled, and is shared */ - if ((data->clkevt.mode == CLOCK_EVT_MODE_PERIODIC) && + if (clockevent_state_periodic(&data->clkevt) && (pit_read(data->base, AT91_PIT_SR) & AT91_PIT_PITS)) { unsigned nr_ticks; @@ -208,8 +202,8 @@ static void __init at91sam926x_pit_common_init(struct pit_data *data) data->clksrc.mask = CLOCKSOURCE_MASK(bits); data->clksrc.name = "pit"; data->clksrc.rating = 175; - data->clksrc.read = read_pit_clk, - data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS, + data->clksrc.read = read_pit_clk; + data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; clocksource_register_hz(&data->clksrc, pit_rate); /* Set up irq handler */ @@ -227,7 +221,8 @@ static void __init at91sam926x_pit_common_init(struct pit_data *data) data->clkevt.rating = 100; data->clkevt.cpumask = cpumask_of(0); - data->clkevt.set_mode = pit_clkevt_mode; + data->clkevt.set_state_shutdown = pit_clkevt_shutdown; + data->clkevt.set_state_periodic = pit_clkevt_set_periodic; data->clkevt.resume = at91sam926x_pit_resume; data->clkevt.suspend = at91sam926x_pit_suspend; clockevents_register_device(&data->clkevt); diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c index 1692e17e0..41b7b6dc1 100644 --- a/drivers/clocksource/timer-atmel-st.c +++ b/drivers/clocksource/timer-atmel-st.c @@ -106,36 +106,47 @@ static struct clocksource clk32k = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -static void -clkevt32k_mode(enum clock_event_mode mode, struct clock_event_device *dev) +static void clkdev32k_disable_and_flush_irq(void) { unsigned int val; /* Disable and flush pending timer interrupts */ regmap_write(regmap_st, AT91_ST_IDR, AT91_ST_PITS | AT91_ST_ALMS); regmap_read(regmap_st, AT91_ST_SR, &val); - last_crtr = read_CRTR(); - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - /* PIT for periodic irqs; fixed rate of 1/HZ */ - irqmask = AT91_ST_PITS; - regmap_write(regmap_st, AT91_ST_PIMR, RM9200_TIMER_LATCH); - break; - case CLOCK_EVT_MODE_ONESHOT: - /* ALM for oneshot irqs, set by next_event() - * before 32 seconds have passed - */ - irqmask = AT91_ST_ALMS; - regmap_write(regmap_st, AT91_ST_RTAR, last_crtr); - break; - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_RESUME: - irqmask = 0; - break; - } +} + +static int clkevt32k_shutdown(struct clock_event_device *evt) +{ + clkdev32k_disable_and_flush_irq(); + irqmask = 0; + regmap_write(regmap_st, AT91_ST_IER, irqmask); + return 0; +} + +static int clkevt32k_set_oneshot(struct clock_event_device *dev) +{ + clkdev32k_disable_and_flush_irq(); + + /* + * ALM for oneshot irqs, set by next_event() + * before 32 seconds have passed. + */ + irqmask = AT91_ST_ALMS; + regmap_write(regmap_st, AT91_ST_RTAR, last_crtr); regmap_write(regmap_st, AT91_ST_IER, irqmask); + return 0; +} + +static int clkevt32k_set_periodic(struct clock_event_device *dev) +{ + clkdev32k_disable_and_flush_irq(); + + /* PIT for periodic irqs; fixed rate of 1/HZ */ + irqmask = AT91_ST_PITS; + regmap_write(regmap_st, AT91_ST_PIMR, RM9200_TIMER_LATCH); + regmap_write(regmap_st, AT91_ST_IER, irqmask); + return 0; } static int @@ -170,11 +181,15 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev) } static struct clock_event_device clkevt = { - .name = "at91_tick", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .rating = 150, - .set_next_event = clkevt32k_next_event, - .set_mode = clkevt32k_mode, + .name = "at91_tick", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .rating = 150, + .set_next_event = clkevt32k_next_event, + .set_state_shutdown = clkevt32k_shutdown, + .set_state_periodic = clkevt32k_set_periodic, + .set_state_oneshot = clkevt32k_set_oneshot, + .tick_resume = clkevt32k_shutdown, }; /* diff --git a/drivers/clocksource/timer-digicolor.c b/drivers/clocksource/timer-digicolor.c index 7f8388cfa..a536eeb63 100644 --- a/drivers/clocksource/timer-digicolor.c +++ b/drivers/clocksource/timer-digicolor.c @@ -87,27 +87,27 @@ static inline void dc_timer_set_count(struct clock_event_device *ce, writel(count, dt->base + COUNT(dt->timer_id)); } -static void digicolor_clkevt_mode(enum clock_event_mode mode, - struct clock_event_device *ce) +static int digicolor_clkevt_shutdown(struct clock_event_device *ce) +{ + dc_timer_disable(ce); + return 0; +} + +static int digicolor_clkevt_set_oneshot(struct clock_event_device *ce) +{ + dc_timer_disable(ce); + dc_timer_enable(ce, CONTROL_MODE_ONESHOT); + return 0; +} + +static int digicolor_clkevt_set_periodic(struct clock_event_device *ce) { struct digicolor_timer *dt = dc_timer(ce); - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - dc_timer_disable(ce); - dc_timer_set_count(ce, dt->ticks_per_jiffy); - dc_timer_enable(ce, CONTROL_MODE_PERIODIC); - break; - case CLOCK_EVT_MODE_ONESHOT: - dc_timer_disable(ce); - dc_timer_enable(ce, CONTROL_MODE_ONESHOT); - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - default: - dc_timer_disable(ce); - break; - } + dc_timer_disable(ce); + dc_timer_set_count(ce, dt->ticks_per_jiffy); + dc_timer_enable(ce, CONTROL_MODE_PERIODIC); + return 0; } static int digicolor_clkevt_next_event(unsigned long evt, @@ -125,7 +125,10 @@ static struct digicolor_timer dc_timer_dev = { .name = "digicolor_tick", .rating = 340, .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_mode = digicolor_clkevt_mode, + .set_state_shutdown = digicolor_clkevt_shutdown, + .set_state_periodic = digicolor_clkevt_set_periodic, + .set_state_oneshot = digicolor_clkevt_set_oneshot, + .tick_resume = digicolor_clkevt_shutdown, .set_next_event = digicolor_clkevt_next_event, }, .timer_id = TIMER_C, @@ -140,7 +143,7 @@ static irqreturn_t digicolor_timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static u64 digicolor_timer_sched_read(void) +static u64 notrace digicolor_timer_sched_read(void) { return ~readl(dc_timer_dev.base + COUNT(TIMER_B)); } diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c index 86c7eb66b..839aba92f 100644 --- a/drivers/clocksource/timer-imx-gpt.c +++ b/drivers/clocksource/timer-imx-gpt.c @@ -83,7 +83,6 @@ struct imx_timer { struct clk *clk_ipg; const struct imx_gpt_data *gpt; struct clock_event_device ced; - enum clock_event_mode cem; struct irqaction act; }; @@ -212,18 +211,38 @@ static int v2_set_next_event(unsigned long evt, -ETIME : 0; } +static int mxc_shutdown(struct clock_event_device *ced) +{ + struct imx_timer *imxtm = to_imx_timer(ced); + unsigned long flags; + u32 tcn; + + /* + * The timer interrupt generation is disabled at least + * for enough time to call mxc_set_next_event() + */ + local_irq_save(flags); + + /* Disable interrupt in GPT module */ + imxtm->gpt->gpt_irq_disable(imxtm); + + tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn); + /* Set event time into far-far future */ + writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp); + + /* Clear pending interrupt */ + imxtm->gpt->gpt_irq_acknowledge(imxtm); + #ifdef DEBUG -static const char *clock_event_mode_label[] = { - [CLOCK_EVT_MODE_PERIODIC] = "CLOCK_EVT_MODE_PERIODIC", - [CLOCK_EVT_MODE_ONESHOT] = "CLOCK_EVT_MODE_ONESHOT", - [CLOCK_EVT_MODE_SHUTDOWN] = "CLOCK_EVT_MODE_SHUTDOWN", - [CLOCK_EVT_MODE_UNUSED] = "CLOCK_EVT_MODE_UNUSED", - [CLOCK_EVT_MODE_RESUME] = "CLOCK_EVT_MODE_RESUME", -}; + printk(KERN_INFO "%s: changing mode\n", __func__); #endif /* DEBUG */ -static void mxc_set_mode(enum clock_event_mode mode, - struct clock_event_device *ced) + local_irq_restore(flags); + + return 0; +} + +static int mxc_set_oneshot(struct clock_event_device *ced) { struct imx_timer *imxtm = to_imx_timer(ced); unsigned long flags; @@ -237,7 +256,7 @@ static void mxc_set_mode(enum clock_event_mode mode, /* Disable interrupt in GPT module */ imxtm->gpt->gpt_irq_disable(imxtm); - if (mode != imxtm->cem) { + if (!clockevent_state_oneshot(ced)) { u32 tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn); /* Set event time into far-far future */ writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp); @@ -247,37 +266,19 @@ static void mxc_set_mode(enum clock_event_mode mode, } #ifdef DEBUG - printk(KERN_INFO "mxc_set_mode: changing mode from %s to %s\n", - clock_event_mode_label[imxtm->cem], - clock_event_mode_label[mode]); + printk(KERN_INFO "%s: changing mode\n", __func__); #endif /* DEBUG */ - /* Remember timer mode */ - imxtm->cem = mode; - local_irq_restore(flags); - - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - printk(KERN_ERR"mxc_set_mode: Periodic mode is not " - "supported for i.MX\n"); - break; - case CLOCK_EVT_MODE_ONESHOT: /* * Do not put overhead of interrupt enable/disable into * mxc_set_next_event(), the core has about 4 minutes * to call mxc_set_next_event() or shutdown clock after * mode switching */ - local_irq_save(flags); - imxtm->gpt->gpt_irq_enable(imxtm); - local_irq_restore(flags); - break; - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_RESUME: - /* Left event sources disabled, no more interrupts appear */ - break; - } + imxtm->gpt->gpt_irq_enable(imxtm); + local_irq_restore(flags); + + return 0; } /* @@ -303,11 +304,11 @@ static int __init mxc_clockevent_init(struct imx_timer *imxtm) struct clock_event_device *ced = &imxtm->ced; struct irqaction *act = &imxtm->act; - imxtm->cem = CLOCK_EVT_MODE_UNUSED; - ced->name = "mxc_timer1"; ced->features = CLOCK_EVT_FEAT_ONESHOT; - ced->set_mode = mxc_set_mode; + ced->set_state_shutdown = mxc_shutdown; + ced->set_state_oneshot = mxc_set_oneshot; + ced->tick_resume = mxc_shutdown; ced->set_next_event = imxtm->gpt->set_next_event; ced->rating = 200; ced->cpumask = cpumask_of(0); diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c index a68866e0e..3f59ac218 100644 --- a/drivers/clocksource/timer-integrator-ap.c +++ b/drivers/clocksource/timer-integrator-ap.c @@ -75,33 +75,37 @@ static irqreturn_t integrator_timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static void clkevt_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) +static int clkevt_shutdown(struct clock_event_device *evt) { u32 ctrl = readl(clkevt_base + TIMER_CTRL) & ~TIMER_CTRL_ENABLE; /* Disable timer */ writel(ctrl, clkevt_base + TIMER_CTRL); + return 0; +} - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - /* Enable the timer and start the periodic tick */ - writel(timer_reload, clkevt_base + TIMER_LOAD); - ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE; - writel(ctrl, clkevt_base + TIMER_CTRL); - break; - case CLOCK_EVT_MODE_ONESHOT: - /* Leave the timer disabled, .set_next_event will enable it */ - ctrl &= ~TIMER_CTRL_PERIODIC; - writel(ctrl, clkevt_base + TIMER_CTRL); - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_RESUME: - default: - /* Just leave in disabled state */ - break; - } +static int clkevt_set_oneshot(struct clock_event_device *evt) +{ + u32 ctrl = readl(clkevt_base + TIMER_CTRL) & + ~(TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC); + + /* Leave the timer disabled, .set_next_event will enable it */ + writel(ctrl, clkevt_base + TIMER_CTRL); + return 0; +} +static int clkevt_set_periodic(struct clock_event_device *evt) +{ + u32 ctrl = readl(clkevt_base + TIMER_CTRL) & ~TIMER_CTRL_ENABLE; + + /* Disable timer */ + writel(ctrl, clkevt_base + TIMER_CTRL); + + /* Enable the timer and start the periodic tick */ + writel(timer_reload, clkevt_base + TIMER_LOAD); + ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE; + writel(ctrl, clkevt_base + TIMER_CTRL); + return 0; } static int clkevt_set_next_event(unsigned long next, struct clock_event_device *evt) @@ -116,11 +120,15 @@ static int clkevt_set_next_event(unsigned long next, struct clock_event_device * } static struct clock_event_device integrator_clockevent = { - .name = "timer1", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_mode = clkevt_set_mode, - .set_next_event = clkevt_set_next_event, - .rating = 300, + .name = "timer1", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .set_state_shutdown = clkevt_shutdown, + .set_state_periodic = clkevt_set_periodic, + .set_state_oneshot = clkevt_set_oneshot, + .tick_resume = clkevt_shutdown, + .set_next_event = clkevt_set_next_event, + .rating = 300, }; static struct irqaction integrator_timer_irq = { diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c index 0250354f7..1cea08cf6 100644 --- a/drivers/clocksource/timer-keystone.c +++ b/drivers/clocksource/timer-keystone.c @@ -72,10 +72,10 @@ static inline void keystone_timer_barrier(void) /** * keystone_timer_config: configures timer to work in oneshot/periodic modes. - * @ mode: mode to configure + * @ mask: mask of the mode to configure * @ period: cycles number to configure for */ -static int keystone_timer_config(u64 period, enum clock_event_mode mode) +static int keystone_timer_config(u64 period, int mask) { u32 tcr; u32 off; @@ -84,16 +84,7 @@ static int keystone_timer_config(u64 period, enum clock_event_mode mode) off = tcr & ~(TCR_ENAMODE_MASK); /* set enable mode */ - switch (mode) { - case CLOCK_EVT_MODE_ONESHOT: - tcr |= TCR_ENAMODE_ONESHOT_MASK; - break; - case CLOCK_EVT_MODE_PERIODIC: - tcr |= TCR_ENAMODE_PERIODIC_MASK; - break; - default: - return -1; - } + tcr |= mask; /* disable timer */ keystone_timer_writel(off, TCR); @@ -138,24 +129,19 @@ static irqreturn_t keystone_timer_interrupt(int irq, void *dev_id) static int keystone_set_next_event(unsigned long cycles, struct clock_event_device *evt) { - return keystone_timer_config(cycles, evt->mode); + return keystone_timer_config(cycles, TCR_ENAMODE_ONESHOT_MASK); } -static void keystone_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int keystone_shutdown(struct clock_event_device *evt) { - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - keystone_timer_config(timer.hz_period, CLOCK_EVT_MODE_PERIODIC); - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_ONESHOT: - keystone_timer_disable(); - break; - default: - break; - } + keystone_timer_disable(); + return 0; +} + +static int keystone_set_periodic(struct clock_event_device *evt) +{ + keystone_timer_config(timer.hz_period, TCR_ENAMODE_PERIODIC_MASK); + return 0; } static void __init keystone_timer_init(struct device_node *np) @@ -166,7 +152,7 @@ static void __init keystone_timer_init(struct device_node *np) int irq, error; irq = irq_of_parse_and_map(np, 0); - if (irq == NO_IRQ) { + if (!irq) { pr_err("%s: failed to map interrupts\n", __func__); return; } @@ -222,7 +208,9 @@ static void __init keystone_timer_init(struct device_node *np) /* setup clockevent */ event_dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; event_dev->set_next_event = keystone_set_next_event; - event_dev->set_mode = keystone_set_mode; + event_dev->set_state_shutdown = keystone_shutdown; + event_dev->set_state_periodic = keystone_set_periodic; + event_dev->set_state_oneshot = keystone_shutdown; event_dev->cpumask = cpu_all_mask; event_dev->owner = THIS_MODULE; event_dev->name = TIMER_NAME; diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c index ce18d570e..2854c663e 100644 --- a/drivers/clocksource/timer-prima2.c +++ b/drivers/clocksource/timer-prima2.c @@ -73,7 +73,7 @@ static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id) } /* read 64-bit timer counter */ -static cycle_t sirfsoc_timer_read(struct clocksource *cs) +static cycle_t notrace sirfsoc_timer_read(struct clocksource *cs) { u64 cycles; @@ -104,26 +104,21 @@ static int sirfsoc_timer_set_next_event(unsigned long delta, return next - now > delta ? -ETIME : 0; } -static void sirfsoc_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *ce) +static int sirfsoc_timer_shutdown(struct clock_event_device *evt) { u32 val = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN); - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - WARN_ON(1); - break; - case CLOCK_EVT_MODE_ONESHOT: - writel_relaxed(val | BIT(0), - sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN); - break; - case CLOCK_EVT_MODE_SHUTDOWN: - writel_relaxed(val & ~BIT(0), - sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN); - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_RESUME: - break; - } + + writel_relaxed(val & ~BIT(0), + sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN); + return 0; +} + +static int sirfsoc_timer_set_oneshot(struct clock_event_device *evt) +{ + u32 val = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN); + + writel_relaxed(val | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN); + return 0; } static void sirfsoc_clocksource_suspend(struct clocksource *cs) @@ -157,7 +152,8 @@ static struct clock_event_device sirfsoc_clockevent = { .name = "sirfsoc_clockevent", .rating = 200, .features = CLOCK_EVT_FEAT_ONESHOT, - .set_mode = sirfsoc_timer_set_mode, + .set_state_shutdown = sirfsoc_timer_shutdown, + .set_state_oneshot = sirfsoc_timer_set_oneshot, .set_next_event = sirfsoc_timer_set_next_event, }; diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c index ca02503f1..5f45b9ade 100644 --- a/drivers/clocksource/timer-sp804.c +++ b/drivers/clocksource/timer-sp804.c @@ -133,50 +133,50 @@ static irqreturn_t sp804_timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static void sp804_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static inline void timer_shutdown(struct clock_event_device *evt) { - unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE; + writel(0, clkevt_base + TIMER_CTRL); +} - writel(ctrl, clkevt_base + TIMER_CTRL); +static int sp804_shutdown(struct clock_event_device *evt) +{ + timer_shutdown(evt); + return 0; +} - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - writel(clkevt_reload, clkevt_base + TIMER_LOAD); - ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE; - break; - - case CLOCK_EVT_MODE_ONESHOT: - /* period set, and timer enabled in 'next_event' hook */ - ctrl |= TIMER_CTRL_ONESHOT; - break; - - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - default: - break; - } +static int sp804_set_periodic(struct clock_event_device *evt) +{ + unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE | + TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE; + timer_shutdown(evt); + writel(clkevt_reload, clkevt_base + TIMER_LOAD); writel(ctrl, clkevt_base + TIMER_CTRL); + return 0; } static int sp804_set_next_event(unsigned long next, struct clock_event_device *evt) { - unsigned long ctrl = readl(clkevt_base + TIMER_CTRL); + unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE | + TIMER_CTRL_ONESHOT | TIMER_CTRL_ENABLE; writel(next, clkevt_base + TIMER_LOAD); - writel(ctrl | TIMER_CTRL_ENABLE, clkevt_base + TIMER_CTRL); + writel(ctrl, clkevt_base + TIMER_CTRL); return 0; } static struct clock_event_device sp804_clockevent = { - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_DYNIRQ, - .set_mode = sp804_set_mode, - .set_next_event = sp804_set_next_event, - .rating = 300, + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_DYNIRQ, + .set_state_shutdown = sp804_shutdown, + .set_state_periodic = sp804_set_periodic, + .set_state_oneshot = sp804_shutdown, + .tick_resume = sp804_shutdown, + .set_next_event = sp804_set_next_event, + .rating = 300, }; static struct irqaction sp804_timer_irq = { diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c index a97e8b507..f3dcb7679 100644 --- a/drivers/clocksource/timer-stm32.c +++ b/drivers/clocksource/timer-stm32.c @@ -40,24 +40,25 @@ struct stm32_clock_event_ddata { void __iomem *base; }; -static void stm32_clock_event_set_mode(enum clock_event_mode mode, - struct clock_event_device *evtdev) +static int stm32_clock_event_shutdown(struct clock_event_device *evtdev) { struct stm32_clock_event_ddata *data = container_of(evtdev, struct stm32_clock_event_ddata, evtdev); void *base = data->base; - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - writel_relaxed(data->periodic_top, base + TIM_ARR); - writel_relaxed(TIM_CR1_ARPE | TIM_CR1_CEN, base + TIM_CR1); - break; + writel_relaxed(0, base + TIM_CR1); + return 0; +} - case CLOCK_EVT_MODE_ONESHOT: - default: - writel_relaxed(0, base + TIM_CR1); - break; - } +static int stm32_clock_event_set_periodic(struct clock_event_device *evtdev) +{ + struct stm32_clock_event_ddata *data = + container_of(evtdev, struct stm32_clock_event_ddata, evtdev); + void *base = data->base; + + writel_relaxed(data->periodic_top, base + TIM_ARR); + writel_relaxed(TIM_CR1_ARPE | TIM_CR1_CEN, base + TIM_CR1); + return 0; } static int stm32_clock_event_set_next_event(unsigned long evt, @@ -88,7 +89,10 @@ static struct stm32_clock_event_ddata clock_event_ddata = { .evtdev = { .name = "stm32 clockevent", .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, - .set_mode = stm32_clock_event_set_mode, + .set_state_shutdown = stm32_clock_event_shutdown, + .set_state_periodic = stm32_clock_event_set_periodic, + .set_state_oneshot = stm32_clock_event_shutdown, + .tick_resume = stm32_clock_event_shutdown, .set_next_event = stm32_clock_event_set_next_event, .rating = 200, }, diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index 0ffb4ea7c..bca9573e0 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c @@ -103,27 +103,31 @@ static void sun5i_clkevt_time_start(struct sun5i_timer_clkevt *ce, u8 timer, boo ce->timer.base + TIMER_CTL_REG(timer)); } -static void sun5i_clkevt_mode(enum clock_event_mode mode, - struct clock_event_device *clkevt) +static int sun5i_clkevt_shutdown(struct clock_event_device *clkevt) { struct sun5i_timer_clkevt *ce = to_sun5i_timer_clkevt(clkevt); - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - sun5i_clkevt_time_stop(ce, 0); - sun5i_clkevt_time_setup(ce, 0, ce->timer.ticks_per_jiffy); - sun5i_clkevt_time_start(ce, 0, true); - break; - case CLOCK_EVT_MODE_ONESHOT: - sun5i_clkevt_time_stop(ce, 0); - sun5i_clkevt_time_start(ce, 0, false); - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - default: - sun5i_clkevt_time_stop(ce, 0); - break; - } + sun5i_clkevt_time_stop(ce, 0); + return 0; +} + +static int sun5i_clkevt_set_oneshot(struct clock_event_device *clkevt) +{ + struct sun5i_timer_clkevt *ce = to_sun5i_timer_clkevt(clkevt); + + sun5i_clkevt_time_stop(ce, 0); + sun5i_clkevt_time_start(ce, 0, false); + return 0; +} + +static int sun5i_clkevt_set_periodic(struct clock_event_device *clkevt) +{ + struct sun5i_timer_clkevt *ce = to_sun5i_timer_clkevt(clkevt); + + sun5i_clkevt_time_stop(ce, 0); + sun5i_clkevt_time_setup(ce, 0, ce->timer.ticks_per_jiffy); + sun5i_clkevt_time_start(ce, 0, true); + return 0; } static int sun5i_clkevt_next_event(unsigned long evt, @@ -286,7 +290,10 @@ static int __init sun5i_setup_clockevent(struct device_node *node, void __iomem ce->clkevt.name = node->name; ce->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; ce->clkevt.set_next_event = sun5i_clkevt_next_event; - ce->clkevt.set_mode = sun5i_clkevt_mode; + ce->clkevt.set_state_shutdown = sun5i_clkevt_shutdown; + ce->clkevt.set_state_periodic = sun5i_clkevt_set_periodic; + ce->clkevt.set_state_oneshot = sun5i_clkevt_set_oneshot; + ce->clkevt.tick_resume = sun5i_clkevt_shutdown; ce->clkevt.rating = 340; ce->clkevt.irq = irq; ce->clkevt.cpumask = cpu_possible_mask; diff --git a/drivers/clocksource/timer-u300.c b/drivers/clocksource/timer-u300.c index 5dcf75697..1744b2438 100644 --- a/drivers/clocksource/timer-u300.c +++ b/drivers/clocksource/timer-u300.c @@ -187,85 +187,82 @@ struct u300_clockevent_data { unsigned ticks_per_jiffy; }; +static int u300_shutdown(struct clock_event_device *evt) +{ + /* Disable interrupts on GP1 */ + writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE, + u300_timer_base + U300_TIMER_APP_GPT1IE); + /* Disable GP1 */ + writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE, + u300_timer_base + U300_TIMER_APP_DGPT1); + return 0; +} + /* - * The u300_set_mode() function is always called first, if we - * have oneshot timer active, the oneshot scheduling function + * If we have oneshot timer active, the oneshot scheduling function * u300_set_next_event() is called immediately after. */ -static void u300_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int u300_set_oneshot(struct clock_event_device *evt) +{ + /* Just return; here? */ + /* + * The actual event will be programmed by the next event hook, + * so we just set a dummy value somewhere at the end of the + * universe here. + */ + /* Disable interrupts on GPT1 */ + writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE, + u300_timer_base + U300_TIMER_APP_GPT1IE); + /* Disable GP1 while we're reprogramming it. */ + writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE, + u300_timer_base + U300_TIMER_APP_DGPT1); + /* + * Expire far in the future, u300_set_next_event() will be + * called soon... + */ + writel(0xFFFFFFFF, u300_timer_base + U300_TIMER_APP_GPT1TC); + /* We run one shot per tick here! */ + writel(U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT, + u300_timer_base + U300_TIMER_APP_SGPT1M); + /* Enable interrupts for this timer */ + writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE, + u300_timer_base + U300_TIMER_APP_GPT1IE); + /* Enable timer */ + writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE, + u300_timer_base + U300_TIMER_APP_EGPT1); + return 0; +} + +static int u300_set_periodic(struct clock_event_device *evt) { struct u300_clockevent_data *cevdata = container_of(evt, struct u300_clockevent_data, cevd); - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - /* Disable interrupts on GPT1 */ - writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE, - u300_timer_base + U300_TIMER_APP_GPT1IE); - /* Disable GP1 while we're reprogramming it. */ - writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE, - u300_timer_base + U300_TIMER_APP_DGPT1); - /* - * Set the periodic mode to a certain number of ticks per - * jiffy. - */ - writel(cevdata->ticks_per_jiffy, - u300_timer_base + U300_TIMER_APP_GPT1TC); - /* - * Set continuous mode, so the timer keeps triggering - * interrupts. - */ - writel(U300_TIMER_APP_SGPT1M_MODE_CONTINUOUS, - u300_timer_base + U300_TIMER_APP_SGPT1M); - /* Enable timer interrupts */ - writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE, - u300_timer_base + U300_TIMER_APP_GPT1IE); - /* Then enable the OS timer again */ - writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE, - u300_timer_base + U300_TIMER_APP_EGPT1); - break; - case CLOCK_EVT_MODE_ONESHOT: - /* Just break; here? */ - /* - * The actual event will be programmed by the next event hook, - * so we just set a dummy value somewhere at the end of the - * universe here. - */ - /* Disable interrupts on GPT1 */ - writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE, - u300_timer_base + U300_TIMER_APP_GPT1IE); - /* Disable GP1 while we're reprogramming it. */ - writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE, - u300_timer_base + U300_TIMER_APP_DGPT1); - /* - * Expire far in the future, u300_set_next_event() will be - * called soon... - */ - writel(0xFFFFFFFF, u300_timer_base + U300_TIMER_APP_GPT1TC); - /* We run one shot per tick here! */ - writel(U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT, - u300_timer_base + U300_TIMER_APP_SGPT1M); - /* Enable interrupts for this timer */ - writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE, - u300_timer_base + U300_TIMER_APP_GPT1IE); - /* Enable timer */ - writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE, - u300_timer_base + U300_TIMER_APP_EGPT1); - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - /* Disable interrupts on GP1 */ - writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE, - u300_timer_base + U300_TIMER_APP_GPT1IE); - /* Disable GP1 */ - writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE, - u300_timer_base + U300_TIMER_APP_DGPT1); - break; - case CLOCK_EVT_MODE_RESUME: - /* Ignore this call */ - break; - } + /* Disable interrupts on GPT1 */ + writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE, + u300_timer_base + U300_TIMER_APP_GPT1IE); + /* Disable GP1 while we're reprogramming it. */ + writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE, + u300_timer_base + U300_TIMER_APP_DGPT1); + /* + * Set the periodic mode to a certain number of ticks per + * jiffy. + */ + writel(cevdata->ticks_per_jiffy, + u300_timer_base + U300_TIMER_APP_GPT1TC); + /* + * Set continuous mode, so the timer keeps triggering + * interrupts. + */ + writel(U300_TIMER_APP_SGPT1M_MODE_CONTINUOUS, + u300_timer_base + U300_TIMER_APP_SGPT1M); + /* Enable timer interrupts */ + writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE, + u300_timer_base + U300_TIMER_APP_GPT1IE); + /* Then enable the OS timer again */ + writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE, + u300_timer_base + U300_TIMER_APP_EGPT1); + return 0; } /* @@ -309,13 +306,15 @@ static int u300_set_next_event(unsigned long cycles, static struct u300_clockevent_data u300_clockevent_data = { /* Use general purpose timer 1 as clock event */ .cevd = { - .name = "GPT1", + .name = "GPT1", /* Reasonably fast and accurate clock event */ - .rating = 300, - .features = CLOCK_EVT_FEAT_PERIODIC | - CLOCK_EVT_FEAT_ONESHOT, - .set_next_event = u300_set_next_event, - .set_mode = u300_set_mode, + .rating = 300, + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .set_next_event = u300_set_next_event, + .set_state_shutdown = u300_shutdown, + .set_state_periodic = u300_set_periodic, + .set_state_oneshot = u300_set_oneshot, }, }; diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c index b45ac6229..a0e6c6853 100644 --- a/drivers/clocksource/vf_pit_timer.c +++ b/drivers/clocksource/vf_pit_timer.c @@ -52,7 +52,7 @@ static inline void pit_irq_acknowledge(void) __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG); } -static u64 pit_read_sched_clock(void) +static u64 notrace pit_read_sched_clock(void) { return ~__raw_readl(clksrc_base + PITCVAL); } @@ -86,20 +86,16 @@ static int pit_set_next_event(unsigned long delta, return 0; } -static void pit_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int pit_shutdown(struct clock_event_device *evt) { - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - pit_set_next_event(cycle_per_jiffy, evt); - break; - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_UNUSED: - pit_timer_disable(); - break; - default: - break; - } + pit_timer_disable(); + return 0; +} + +static int pit_set_periodic(struct clock_event_device *evt) +{ + pit_set_next_event(cycle_per_jiffy, evt); + return 0; } static irqreturn_t pit_timer_interrupt(int irq, void *dev_id) @@ -114,7 +110,7 @@ static irqreturn_t pit_timer_interrupt(int irq, void *dev_id) * and start the counter again. So software need to disable the timer * to stop the counter loop in ONESHOT mode. */ - if (likely(evt->mode == CLOCK_EVT_MODE_ONESHOT)) + if (likely(clockevent_state_oneshot(evt))) pit_timer_disable(); evt->event_handler(evt); @@ -125,7 +121,8 @@ static irqreturn_t pit_timer_interrupt(int irq, void *dev_id) static struct clock_event_device clockevent_pit = { .name = "VF pit timer", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_mode = pit_set_mode, + .set_state_shutdown = pit_shutdown, + .set_state_periodic = pit_set_periodic, .set_next_event = pit_set_next_event, .rating = 300, }; diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c index 1098ed3b9..a92e94b40 100644 --- a/drivers/clocksource/vt8500_timer.c +++ b/drivers/clocksource/vt8500_timer.c @@ -88,29 +88,20 @@ static int vt8500_timer_set_next_event(unsigned long cycles, return 0; } -static void vt8500_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int vt8500_shutdown(struct clock_event_device *evt) { - switch (mode) { - case CLOCK_EVT_MODE_RESUME: - case CLOCK_EVT_MODE_PERIODIC: - break; - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - writel(readl(regbase + TIMER_CTRL_VAL) | 1, - regbase + TIMER_CTRL_VAL); - writel(0, regbase + TIMER_IER_VAL); - break; - } + writel(readl(regbase + TIMER_CTRL_VAL) | 1, regbase + TIMER_CTRL_VAL); + writel(0, regbase + TIMER_IER_VAL); + return 0; } static struct clock_event_device clockevent = { - .name = "vt8500_timer", - .features = CLOCK_EVT_FEAT_ONESHOT, - .rating = 200, - .set_next_event = vt8500_timer_set_next_event, - .set_mode = vt8500_timer_set_mode, + .name = "vt8500_timer", + .features = CLOCK_EVT_FEAT_ONESHOT, + .rating = 200, + .set_next_event = vt8500_timer_set_next_event, + .set_state_shutdown = vt8500_shutdown, + .set_state_oneshot = vt8500_shutdown, }; static irqreturn_t vt8500_timer_interrupt(int irq, void *dev_id) diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c index 7ce442148..ceaa6133f 100644 --- a/drivers/clocksource/zevio-timer.c +++ b/drivers/clocksource/zevio-timer.c @@ -76,32 +76,28 @@ static int zevio_timer_set_event(unsigned long delta, return 0; } -static void zevio_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *dev) +static int zevio_timer_shutdown(struct clock_event_device *dev) { struct zevio_timer *timer = container_of(dev, struct zevio_timer, clkevt); - switch (mode) { - case CLOCK_EVT_MODE_RESUME: - case CLOCK_EVT_MODE_ONESHOT: - /* Enable timer interrupts */ - writel(TIMER_INTR_MSK, timer->interrupt_regs + IO_INTR_MSK); - writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK); - break; - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_UNUSED: - /* Disable timer interrupts */ - writel(0, timer->interrupt_regs + IO_INTR_MSK); - writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK); - /* Stop timer */ - writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL); - break; - case CLOCK_EVT_MODE_PERIODIC: - default: - /* Unsupported */ - break; - } + /* Disable timer interrupts */ + writel(0, timer->interrupt_regs + IO_INTR_MSK); + writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK); + /* Stop timer */ + writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL); + return 0; +} + +static int zevio_timer_set_oneshot(struct clock_event_device *dev) +{ + struct zevio_timer *timer = container_of(dev, struct zevio_timer, + clkevt); + + /* Enable timer interrupts */ + writel(TIMER_INTR_MSK, timer->interrupt_regs + IO_INTR_MSK); + writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK); + return 0; } static irqreturn_t zevio_timer_interrupt(int irq, void *dev_id) @@ -162,7 +158,9 @@ static int __init zevio_timer_add(struct device_node *node) if (timer->interrupt_regs && irqnr) { timer->clkevt.name = timer->clockevent_name; timer->clkevt.set_next_event = zevio_timer_set_event; - timer->clkevt.set_mode = zevio_timer_set_mode; + timer->clkevt.set_state_shutdown = zevio_timer_shutdown; + timer->clkevt.set_state_oneshot = zevio_timer_set_oneshot; + timer->clkevt.tick_resume = zevio_timer_set_oneshot; timer->clkevt.rating = 200; timer->clkevt.cpumask = cpu_all_mask; timer->clkevt.features = CLOCK_EVT_FEAT_ONESHOT; diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index cc8a71c26..cd0391e46 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -24,55 +24,6 @@ config ARM_VEXPRESS_SPC_CPUFREQ This add the CPUfreq driver support for Versatile Express big.LITTLE platforms using SPC for power management. - -config ARM_EXYNOS_CPUFREQ - tristate "SAMSUNG EXYNOS CPUfreq Driver" - depends on CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412 || SOC_EXYNOS5250 - depends on THERMAL - help - This adds the CPUFreq driver for Samsung EXYNOS platforms. - Supported SoC versions are: - Exynos4210, Exynos4212, Exynos4412, and Exynos5250. - - If in doubt, say N. - -config ARM_EXYNOS4X12_CPUFREQ - bool "SAMSUNG EXYNOS4x12" - depends on SOC_EXYNOS4212 || SOC_EXYNOS4412 - depends on ARM_EXYNOS_CPUFREQ - default y - help - This adds the CPUFreq driver for Samsung EXYNOS4X12 - SoC (EXYNOS4212 or EXYNOS4412). - - If in doubt, say N. - -config ARM_EXYNOS5250_CPUFREQ - bool "SAMSUNG EXYNOS5250" - depends on SOC_EXYNOS5250 - depends on ARM_EXYNOS_CPUFREQ - default y - help - This adds the CPUFreq driver for Samsung EXYNOS5250 - SoC. - - If in doubt, say N. - -config ARM_EXYNOS_CPU_FREQ_BOOST_SW - bool "EXYNOS Frequency Overclocking - Software" - depends on ARM_EXYNOS_CPUFREQ && THERMAL - select CPU_FREQ_BOOST_SW - select EXYNOS_THERMAL - help - This driver supports software managed overclocking (BOOST). - It allows usage of special frequencies for Samsung Exynos - processors if thermal conditions are appropriate. - - It requires, for safe operation, thermal framework with properly - defined trip points. - - If in doubt, say N. - config ARM_EXYNOS5440_CPUFREQ tristate "SAMSUNG EXYNOS5440" depends on SOC_EXYNOS5440 @@ -130,6 +81,14 @@ config ARM_KIRKWOOD_CPUFREQ This adds the CPUFreq driver for Marvell Kirkwood SoCs. +config ARM_MT8173_CPUFREQ + bool "Mediatek MT8173 CPUFreq support" + depends on ARCH_MEDIATEK && REGULATOR + depends on !CPU_THERMAL || THERMAL=y + select PM_OPP + help + This adds the CPUFreq driver support for Mediatek MT8173 SoC. + config ARM_OMAP2PLUS_CPUFREQ bool "TI OMAP2+" depends on ARCH_OMAP2PLUS @@ -247,12 +206,19 @@ config ARM_SPEAR_CPUFREQ help This adds the CPUFreq driver support for SPEAr SOCs. -config ARM_TEGRA_CPUFREQ - bool "TEGRA CPUFreq support" +config ARM_TEGRA20_CPUFREQ + bool "Tegra20 CPUFreq support" depends on ARCH_TEGRA default y help - This adds the CPUFreq driver support for TEGRA SOCs. + This adds the CPUFreq driver support for Tegra20 SOCs. + +config ARM_TEGRA124_CPUFREQ + tristate "Tegra124 CPUFreq support" + depends on ARCH_TEGRA && CPUFREQ_DT + default y + help + This adds the CPUFreq driver support for Tegra124 SOCs. config ARM_PXA2xx_CPUFREQ tristate "Intel PXA2xx CPUfreq driver" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 2169bf792..41340384f 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -52,16 +52,13 @@ obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o -obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += arm-exynos-cpufreq.o -arm-exynos-cpufreq-y := exynos-cpufreq.o -arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o -arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o obj-$(CONFIG_ARM_HISI_ACPU_CPUFREQ) += hisi-acpu-cpufreq.o obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o +obj-$(CONFIG_ARM_MT8173_CPUFREQ) += mt8173-cpufreq.o obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o @@ -76,7 +73,8 @@ obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o -obj-$(CONFIG_ARM_TEGRA_CPUFREQ) += tegra-cpufreq.o +obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o +obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o ################################################################################## diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 7c2a7385c..cec1ee2d2 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -65,18 +65,21 @@ enum { #define MSR_K7_HWCR_CPB_DIS (1ULL << 25) struct acpi_cpufreq_data { - struct acpi_processor_performance *acpi_data; struct cpufreq_frequency_table *freq_table; unsigned int resume; unsigned int cpu_feature; + unsigned int acpi_perf_cpu; cpumask_var_t freqdomain_cpus; }; -static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data); - /* acpi_perf_data is a pointer to percpu data. */ static struct acpi_processor_performance __percpu *acpi_perf_data; +static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data) +{ + return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu); +} + static struct cpufreq_driver acpi_cpufreq_driver; static unsigned int acpi_pstate_strict; @@ -144,7 +147,7 @@ static int _store_boost(int val) static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) { - struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); + struct acpi_cpufreq_data *data = policy->driver_data; if (unlikely(!data)) return -ENODEV; @@ -205,7 +208,7 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) struct acpi_processor_performance *perf; int i; - perf = data->acpi_data; + perf = to_perf_data(data); for (i = 0; i < perf->state_count; i++) { if (value == perf->states[i].status) @@ -224,7 +227,7 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) else msr &= INTEL_MSR_RANGE; - perf = data->acpi_data; + perf = to_perf_data(data); cpufreq_for_each_entry(pos, data->freq_table) if (msr == perf->states[pos->driver_data].status) @@ -330,7 +333,8 @@ static void drv_write(struct drv_cmd *cmd) put_cpu(); } -static u32 get_cur_val(const struct cpumask *mask) +static u32 +get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data) { struct acpi_processor_performance *perf; struct drv_cmd cmd; @@ -338,7 +342,7 @@ static u32 get_cur_val(const struct cpumask *mask) if (unlikely(cpumask_empty(mask))) return 0; - switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { + switch (data->cpu_feature) { case SYSTEM_INTEL_MSR_CAPABLE: cmd.type = SYSTEM_INTEL_MSR_CAPABLE; cmd.addr.msr.reg = MSR_IA32_PERF_CTL; @@ -349,7 +353,7 @@ static u32 get_cur_val(const struct cpumask *mask) break; case SYSTEM_IO_CAPABLE: cmd.type = SYSTEM_IO_CAPABLE; - perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; + perf = to_perf_data(data); cmd.addr.io.port = perf->control_register.address; cmd.addr.io.bit_width = perf->control_register.bit_width; break; @@ -367,19 +371,23 @@ static u32 get_cur_val(const struct cpumask *mask) static unsigned int get_cur_freq_on_cpu(unsigned int cpu) { - struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); + struct acpi_cpufreq_data *data; + struct cpufreq_policy *policy; unsigned int freq; unsigned int cached_freq; pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); - if (unlikely(data == NULL || - data->acpi_data == NULL || data->freq_table == NULL)) { + policy = cpufreq_cpu_get_raw(cpu); + if (unlikely(!policy)) return 0; - } - cached_freq = data->freq_table[data->acpi_data->state].frequency; - freq = extract_freq(get_cur_val(cpumask_of(cpu)), data); + data = policy->driver_data; + if (unlikely(!data || !data->freq_table)) + return 0; + + cached_freq = data->freq_table[to_perf_data(data)->state].frequency; + freq = extract_freq(get_cur_val(cpumask_of(cpu), data), data); if (freq != cached_freq) { /* * The dreaded BIOS frequency change behind our back. @@ -400,7 +408,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, unsigned int i; for (i = 0; i < 100; i++) { - cur_freq = extract_freq(get_cur_val(mask), data); + cur_freq = extract_freq(get_cur_val(mask, data), data); if (cur_freq == freq) return 1; udelay(10); @@ -411,18 +419,17 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, static int acpi_cpufreq_target(struct cpufreq_policy *policy, unsigned int index) { - struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); + struct acpi_cpufreq_data *data = policy->driver_data; struct acpi_processor_performance *perf; struct drv_cmd cmd; unsigned int next_perf_state = 0; /* Index into perf table */ int result = 0; - if (unlikely(data == NULL || - data->acpi_data == NULL || data->freq_table == NULL)) { + if (unlikely(data == NULL || data->freq_table == NULL)) { return -ENODEV; } - perf = data->acpi_data; + perf = to_perf_data(data); next_perf_state = data->freq_table[index].driver_data; if (perf->state == next_perf_state) { if (unlikely(data->resume)) { @@ -485,8 +492,9 @@ out: static unsigned long acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) { - struct acpi_processor_performance *perf = data->acpi_data; + struct acpi_processor_performance *perf; + perf = to_perf_data(data); if (cpu_khz) { /* search the closest match to cpu_khz */ unsigned int i; @@ -675,17 +683,17 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) goto err_free; } - data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); - per_cpu(acfreq_data, cpu) = data; + perf = per_cpu_ptr(acpi_perf_data, cpu); + data->acpi_perf_cpu = cpu; + policy->driver_data = data; if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; - result = acpi_processor_register_performance(data->acpi_data, cpu); + result = acpi_processor_register_performance(perf, cpu); if (result) goto err_free_mask; - perf = data->acpi_data; policy->shared_type = perf->shared_type; /* @@ -841,26 +849,25 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) err_freqfree: kfree(data->freq_table); err_unreg: - acpi_processor_unregister_performance(perf, cpu); + acpi_processor_unregister_performance(cpu); err_free_mask: free_cpumask_var(data->freqdomain_cpus); err_free: kfree(data); - per_cpu(acfreq_data, cpu) = NULL; + policy->driver_data = NULL; return result; } static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) { - struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); + struct acpi_cpufreq_data *data = policy->driver_data; pr_debug("acpi_cpufreq_cpu_exit\n"); if (data) { - per_cpu(acfreq_data, policy->cpu) = NULL; - acpi_processor_unregister_performance(data->acpi_data, - policy->cpu); + policy->driver_data = NULL; + acpi_processor_unregister_performance(data->acpi_perf_cpu); free_cpumask_var(data->freqdomain_cpus); kfree(data->freq_table); kfree(data); @@ -871,7 +878,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) static int acpi_cpufreq_resume(struct cpufreq_policy *policy) { - struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); + struct acpi_cpufreq_data *data = policy->driver_data; pr_debug("acpi_cpufreq_resume\n"); @@ -883,7 +890,9 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy) static struct freq_attr *acpi_cpufreq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, &freqdomain_cpus, - NULL, /* this is a placeholder for cpb, do not remove */ +#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB + &cpb, +#endif NULL, }; @@ -956,17 +965,16 @@ static int __init acpi_cpufreq_init(void) * only if configured. This is considered legacy code, which * will probably be removed at some point in the future. */ - if (check_amd_hwpstate_cpu(0)) { - struct freq_attr **iter; - - pr_debug("adding sysfs entry for cpb\n"); + if (!check_amd_hwpstate_cpu(0)) { + struct freq_attr **attr; - for (iter = acpi_cpufreq_attr; *iter != NULL; iter++) - ; + pr_debug("CPB unsupported, do not expose it\n"); - /* make sure there is a terminator behind it */ - if (iter[1] == NULL) - *iter = &cpb; + for (attr = acpi_cpufreq_attr; *attr; attr++) + if (*attr == &cpb) { + *attr = NULL; + break; + } } #endif acpi_cpufreq_boost_init(); diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 99a406501..7c0d70e2a 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -36,6 +36,12 @@ struct private_data { unsigned int voltage_tolerance; /* in percentage */ }; +static struct freq_attr *cpufreq_dt_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, /* Extra space for boost-attr if required */ + NULL, +}; + static int set_target(struct cpufreq_policy *policy, unsigned int index) { struct dev_pm_opp *opp; @@ -184,15 +190,16 @@ try_again: static int cpufreq_init(struct cpufreq_policy *policy) { - struct cpufreq_dt_platform_data *pd; struct cpufreq_frequency_table *freq_table; struct device_node *np; struct private_data *priv; struct device *cpu_dev; struct regulator *cpu_reg; struct clk *cpu_clk; + struct dev_pm_opp *suspend_opp; unsigned long min_uV = ~0, max_uV = 0; unsigned int transition_latency; + bool need_update = false; int ret; ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk); @@ -208,8 +215,30 @@ static int cpufreq_init(struct cpufreq_policy *policy) goto out_put_reg_clk; } - /* OPPs might be populated at runtime, don't check for error here */ - of_init_opp_table(cpu_dev); + /* Get OPP-sharing information from "operating-points-v2" bindings */ + ret = of_get_cpus_sharing_opps(cpu_dev, policy->cpus); + if (ret) { + /* + * operating-points-v2 not supported, fallback to old method of + * finding shared-OPPs for backward compatibility. + */ + if (ret == -ENOENT) + need_update = true; + else + goto out_node_put; + } + + /* + * Initialize OPP tables for all policy->cpus. They will be shared by + * all CPUs which have marked their CPUs shared with OPP bindings. + * + * For platforms not using operating-points-v2 bindings, we do this + * before updating policy->cpus. Otherwise, we will end up creating + * duplicate OPPs for policy->cpus. + * + * OPPs might be populated at runtime, don't check for error here + */ + of_cpumask_init_opp_table(policy->cpus); /* * But we need OPP table to function so if it is not there let's @@ -222,6 +251,26 @@ static int cpufreq_init(struct cpufreq_policy *policy) goto out_free_opp; } + if (need_update) { + struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data(); + + if (!pd || !pd->independent_clocks) + cpumask_setall(policy->cpus); + + /* + * OPP tables are initialized only for policy->cpu, do it for + * others as well. + */ + ret = set_cpus_sharing_opps(cpu_dev, policy->cpus); + if (ret) + dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", + __func__, ret); + + of_property_read_u32(np, "clock-latency", &transition_latency); + } else { + transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev); + } + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { ret = -ENOMEM; @@ -230,7 +279,7 @@ static int cpufreq_init(struct cpufreq_policy *policy) of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance); - if (of_property_read_u32(np, "clock-latency", &transition_latency)) + if (!transition_latency) transition_latency = CPUFREQ_ETERNAL; if (!IS_ERR(cpu_reg)) { @@ -285,6 +334,13 @@ static int cpufreq_init(struct cpufreq_policy *policy) policy->driver_data = priv; policy->clk = cpu_clk; + + rcu_read_lock(); + suspend_opp = dev_pm_opp_get_suspend_opp(cpu_dev); + if (suspend_opp) + policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000; + rcu_read_unlock(); + ret = cpufreq_table_validate_and_show(policy, freq_table); if (ret) { dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, @@ -292,11 +348,16 @@ static int cpufreq_init(struct cpufreq_policy *policy) goto out_free_cpufreq_table; } - policy->cpuinfo.transition_latency = transition_latency; + /* Support turbo/boost mode */ + if (policy_has_boost_freq(policy)) { + /* This gets disabled by core on driver unregister */ + ret = cpufreq_enable_boost_support(); + if (ret) + goto out_free_cpufreq_table; + cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; + } - pd = cpufreq_get_driver_data(); - if (!pd || !pd->independent_clocks) - cpumask_setall(policy->cpus); + policy->cpuinfo.transition_latency = transition_latency; of_node_put(np); @@ -307,7 +368,8 @@ out_free_cpufreq_table: out_free_priv: kfree(priv); out_free_opp: - of_free_opp_table(cpu_dev); + of_cpumask_free_opp_table(policy->cpus); +out_node_put: of_node_put(np); out_put_reg_clk: clk_put(cpu_clk); @@ -323,7 +385,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy) cpufreq_cooling_unregister(priv->cdev); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); - of_free_opp_table(priv->cpu_dev); + of_cpumask_free_opp_table(policy->related_cpus); clk_put(policy->clk); if (!IS_ERR(priv->cpu_reg)) regulator_put(priv->cpu_reg); @@ -368,7 +430,8 @@ static struct cpufreq_driver dt_cpufreq_driver = { .exit = cpufreq_exit, .ready = cpufreq_ready, .name = "cpufreq-dt", - .attr = cpufreq_generic_attr, + .attr = cpufreq_dt_attr, + .suspend = cpufreq_generic_suspend, }; static int dt_cpufreq_probe(struct platform_device *pdev) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 7a3c30c43..cc725941a 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -112,12 +113,6 @@ static inline bool has_target(void) return cpufreq_driver->target_index || cpufreq_driver->target; } -/* - * rwsem to guarantee that cpufreq driver module doesn't unload during critical - * sections - */ -static DECLARE_RWSEM(cpufreq_rwsem); - /* internal prototypes */ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); @@ -244,13 +239,13 @@ int cpufreq_generic_init(struct cpufreq_policy *policy, } EXPORT_SYMBOL_GPL(cpufreq_generic_init); -/* Only for cpufreq core internal use */ struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) { struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; } +EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw); unsigned int cpufreq_generic_get(unsigned int cpu) { @@ -277,10 +272,6 @@ EXPORT_SYMBOL_GPL(cpufreq_generic_get); * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be * freed as that depends on the kobj count. * - * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a - * valid policy is found. This is done to make sure the driver doesn't get - * unregistered while the policy is being used. - * * Return: A valid policy on success, otherwise NULL on failure. */ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) @@ -291,9 +282,6 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) if (WARN_ON(cpu >= nr_cpu_ids)) return NULL; - if (!down_read_trylock(&cpufreq_rwsem)) - return NULL; - /* get the cpufreq driver */ read_lock_irqsave(&cpufreq_driver_lock, flags); @@ -306,9 +294,6 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) read_unlock_irqrestore(&cpufreq_driver_lock, flags); - if (!policy) - up_read(&cpufreq_rwsem); - return policy; } EXPORT_SYMBOL_GPL(cpufreq_cpu_get); @@ -320,13 +305,10 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_get); * * This decrements the kobject reference count incremented earlier by calling * cpufreq_cpu_get(). - * - * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get(). */ void cpufreq_cpu_put(struct cpufreq_policy *policy) { kobject_put(&policy->kobj); - up_read(&cpufreq_rwsem); } EXPORT_SYMBOL_GPL(cpufreq_cpu_put); @@ -539,9 +521,6 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, { int err = -EINVAL; - if (!cpufreq_driver) - goto out; - if (cpufreq_driver->setpolicy) { if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { *policy = CPUFREQ_POLICY_PERFORMANCE; @@ -576,7 +555,6 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, mutex_unlock(&cpufreq_governor_mutex); } -out: return err; } @@ -625,9 +603,7 @@ static ssize_t store_##file_name \ int ret, temp; \ struct cpufreq_policy new_policy; \ \ - ret = cpufreq_get_policy(&new_policy, policy->cpu); \ - if (ret) \ - return -EINVAL; \ + memcpy(&new_policy, policy, sizeof(*policy)); \ \ ret = sscanf(buf, "%u", &new_policy.object); \ if (ret != 1) \ @@ -681,9 +657,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, char str_governor[16]; struct cpufreq_policy new_policy; - ret = cpufreq_get_policy(&new_policy, policy->cpu); - if (ret) - return ret; + memcpy(&new_policy, policy, sizeof(*policy)); ret = sscanf(buf, "%15s", str_governor); if (ret != 1) @@ -694,14 +668,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, return -EINVAL; ret = cpufreq_set_policy(policy, &new_policy); - - policy->user_policy.policy = policy->policy; - policy->user_policy.governor = policy->governor; - - if (ret) - return ret; - else - return count; + return ret ? ret : count; } /** @@ -851,9 +818,6 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) struct freq_attr *fattr = to_attr(attr); ssize_t ret; - if (!down_read_trylock(&cpufreq_rwsem)) - return -EINVAL; - down_read(&policy->rwsem); if (fattr->show) @@ -862,7 +826,6 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) ret = -EIO; up_read(&policy->rwsem); - up_read(&cpufreq_rwsem); return ret; } @@ -879,9 +842,6 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, if (!cpu_online(policy->cpu)) goto unlock; - if (!down_read_trylock(&cpufreq_rwsem)) - goto unlock; - down_write(&policy->rwsem); /* Updating inactive policies is invalid, so avoid doing that. */ @@ -897,8 +857,6 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, unlock_policy_rwsem: up_write(&policy->rwsem); - - up_read(&cpufreq_rwsem); unlock: put_online_cpus(); @@ -1027,8 +985,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy) } } -static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, - struct device *dev) +static int cpufreq_add_dev_interface(struct cpufreq_policy *policy) { struct freq_attr **drv_attr; int ret = 0; @@ -1060,11 +1017,10 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, return cpufreq_add_dev_symlink(policy); } -static void cpufreq_init_policy(struct cpufreq_policy *policy) +static int cpufreq_init_policy(struct cpufreq_policy *policy) { struct cpufreq_governor *gov = NULL; struct cpufreq_policy new_policy; - int ret = 0; memcpy(&new_policy, policy, sizeof(*policy)); @@ -1083,16 +1039,10 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy) cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); /* set default policy */ - ret = cpufreq_set_policy(policy, &new_policy); - if (ret) { - pr_debug("setting policy failed\n"); - if (cpufreq_driver->exit) - cpufreq_driver->exit(policy); - } + return cpufreq_set_policy(policy, &new_policy); } -static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, - unsigned int cpu, struct device *dev) +static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) { int ret = 0; @@ -1126,33 +1076,15 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, return 0; } -static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) -{ - struct cpufreq_policy *policy; - unsigned long flags; - - read_lock_irqsave(&cpufreq_driver_lock, flags); - policy = per_cpu(cpufreq_cpu_data, cpu); - read_unlock_irqrestore(&cpufreq_driver_lock, flags); - - if (likely(policy)) { - /* Policy should be inactive here */ - WARN_ON(!policy_is_inactive(policy)); - - down_write(&policy->rwsem); - policy->cpu = cpu; - policy->governor = NULL; - up_write(&policy->rwsem); - } - - return policy; -} - -static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev) +static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) { + struct device *dev = get_cpu_device(cpu); struct cpufreq_policy *policy; int ret; + if (WARN_ON(!dev)) + return NULL; + policy = kzalloc(sizeof(*policy), GFP_KERNEL); if (!policy) return NULL; @@ -1180,10 +1112,10 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev) init_completion(&policy->kobj_unregister); INIT_WORK(&policy->update, handle_update); - policy->cpu = dev->id; + policy->cpu = cpu; /* Set this once on allocation */ - policy->kobj_cpu = dev->id; + policy->kobj_cpu = cpu; return policy; @@ -1245,59 +1177,34 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify) kfree(policy); } -/** - * cpufreq_add_dev - add a CPU device - * - * Adds the cpufreq interface for a CPU device. - * - * The Oracle says: try running cpufreq registration/unregistration concurrently - * with with cpu hotplugging and all hell will break loose. Tried to clean this - * mess up, but more thorough testing is needed. - Mathieu - */ -static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) +static int cpufreq_online(unsigned int cpu) { - unsigned int j, cpu = dev->id; - int ret = -ENOMEM; struct cpufreq_policy *policy; + bool new_policy; unsigned long flags; - bool recover_policy = !sif; - - pr_debug("adding CPU %u\n", cpu); - - if (cpu_is_offline(cpu)) { - /* - * Only possible if we are here from the subsys_interface add - * callback. A hotplug notifier will follow and we will handle - * it as CPU online then. For now, just create the sysfs link, - * unless there is no policy or the link is already present. - */ - policy = per_cpu(cpufreq_cpu_data, cpu); - return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus) - ? add_cpu_dev_symlink(policy, cpu) : 0; - } + unsigned int j; + int ret; - if (!down_read_trylock(&cpufreq_rwsem)) - return 0; + pr_debug("%s: bringing CPU%u online\n", __func__, cpu); /* Check if this CPU already has a policy to manage it */ policy = per_cpu(cpufreq_cpu_data, cpu); - if (policy && !policy_is_inactive(policy)) { + if (policy) { WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); - ret = cpufreq_add_policy_cpu(policy, cpu, dev); - up_read(&cpufreq_rwsem); - return ret; - } + if (!policy_is_inactive(policy)) + return cpufreq_add_policy_cpu(policy, cpu); - /* - * Restore the saved policy when doing light-weight init and fall back - * to the full init if that fails. - */ - policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL; - if (!policy) { - recover_policy = false; - policy = cpufreq_policy_alloc(dev); + /* This is the only online CPU for the policy. Start over. */ + new_policy = false; + down_write(&policy->rwsem); + policy->cpu = cpu; + policy->governor = NULL; + up_write(&policy->rwsem); + } else { + new_policy = true; + policy = cpufreq_policy_alloc(cpu); if (!policy) - goto nomem_out; + return -ENOMEM; } cpumask_copy(policy->cpus, cpumask_of(cpu)); @@ -1308,17 +1215,17 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) ret = cpufreq_driver->init(policy); if (ret) { pr_debug("initialization failed\n"); - goto err_set_policy_cpu; + goto out_free_policy; } down_write(&policy->rwsem); - /* related cpus should atleast have policy->cpus */ - cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); - - /* Remember which CPUs have been present at the policy creation time. */ - if (!recover_policy) + if (new_policy) { + /* related_cpus should at least include policy->cpus. */ + cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); + /* Remember CPUs present at the policy creation time. */ cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask); + } /* * affected cpus must always be the one, which are online. We aren't @@ -1326,7 +1233,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) */ cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); - if (!recover_policy) { + if (new_policy) { policy->user_policy.min = policy->min; policy->user_policy.max = policy->max; @@ -1340,7 +1247,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) policy->cur = cpufreq_driver->get(policy->cpu); if (!policy->cur) { pr_err("%s: ->get() failed\n", __func__); - goto err_get_freq; + goto out_exit_policy; } } @@ -1387,10 +1294,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_START, policy); - if (!recover_policy) { - ret = cpufreq_add_dev_interface(policy, dev); + if (new_policy) { + ret = cpufreq_add_dev_interface(policy); if (ret) - goto err_out_unregister; + goto out_exit_policy; blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_CREATE_POLICY, policy); @@ -1399,18 +1306,19 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) write_unlock_irqrestore(&cpufreq_driver_lock, flags); } - cpufreq_init_policy(policy); - - if (!recover_policy) { - policy->user_policy.policy = policy->policy; - policy->user_policy.governor = policy->governor; + ret = cpufreq_init_policy(policy); + if (ret) { + pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n", + __func__, cpu, ret); + /* cpufreq_policy_free() will notify based on this */ + new_policy = false; + goto out_exit_policy; } + up_write(&policy->rwsem); kobject_uevent(&policy->kobj, KOBJ_ADD); - up_read(&cpufreq_rwsem); - /* Callback for handling stuff after policy is ready */ if (cpufreq_driver->ready) cpufreq_driver->ready(policy); @@ -1419,24 +1327,47 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) return 0; -err_out_unregister: -err_get_freq: +out_exit_policy: up_write(&policy->rwsem); if (cpufreq_driver->exit) cpufreq_driver->exit(policy); -err_set_policy_cpu: - cpufreq_policy_free(policy, recover_policy); -nomem_out: - up_read(&cpufreq_rwsem); +out_free_policy: + cpufreq_policy_free(policy, !new_policy); + return ret; +} + +/** + * cpufreq_add_dev - the cpufreq interface for a CPU device. + * @dev: CPU device. + * @sif: Subsystem interface structure pointer (not used) + */ +static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) +{ + unsigned cpu = dev->id; + int ret; + + dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu); + + if (cpu_online(cpu)) { + ret = cpufreq_online(cpu); + } else { + /* + * A hotplug notifier will follow and we will handle it as CPU + * online then. For now, just create the sysfs link, unless + * there is no policy or the link is already present. + */ + struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); + + ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus) + ? add_cpu_dev_symlink(policy, cpu) : 0; + } return ret; } -static int __cpufreq_remove_dev_prepare(struct device *dev) +static void cpufreq_offline_prepare(unsigned int cpu) { - unsigned int cpu = dev->id; - int ret = 0; struct cpufreq_policy *policy; pr_debug("%s: unregistering CPU %u\n", __func__, cpu); @@ -1444,11 +1375,11 @@ static int __cpufreq_remove_dev_prepare(struct device *dev) policy = cpufreq_cpu_get_raw(cpu); if (!policy) { pr_debug("%s: No cpu_data found\n", __func__); - return -EINVAL; + return; } if (has_target()) { - ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); + int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); if (ret) pr_err("%s: Failed to stop governor\n", __func__); } @@ -1469,7 +1400,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev) /* Start governor again for active policy */ if (!policy_is_inactive(policy)) { if (has_target()) { - ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); + int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); if (!ret) ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); @@ -1479,28 +1410,24 @@ static int __cpufreq_remove_dev_prepare(struct device *dev) } else if (cpufreq_driver->stop_cpu) { cpufreq_driver->stop_cpu(policy); } - - return ret; } -static int __cpufreq_remove_dev_finish(struct device *dev) +static void cpufreq_offline_finish(unsigned int cpu) { - unsigned int cpu = dev->id; - int ret; struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); if (!policy) { pr_debug("%s: No cpu_data found\n", __func__); - return -EINVAL; + return; } /* Only proceed for inactive policies */ if (!policy_is_inactive(policy)) - return 0; + return; /* If cpu is last user of policy, free policy */ if (has_target()) { - ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); + int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); if (ret) pr_err("%s: Failed to exit governor\n", __func__); } @@ -1510,10 +1437,10 @@ static int __cpufreq_remove_dev_finish(struct device *dev) * since this is a core component, and is essential for the * subsequent light-weight ->init() to succeed. */ - if (cpufreq_driver->exit) + if (cpufreq_driver->exit) { cpufreq_driver->exit(policy); - - return 0; + policy->freq_table = NULL; + } } /** @@ -1521,24 +1448,24 @@ static int __cpufreq_remove_dev_finish(struct device *dev) * * Removes the cpufreq interface for a CPU device. */ -static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) +static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) { unsigned int cpu = dev->id; struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); if (!policy) - return 0; + return; if (cpu_online(cpu)) { - __cpufreq_remove_dev_prepare(dev); - __cpufreq_remove_dev_finish(dev); + cpufreq_offline_prepare(cpu); + cpufreq_offline_finish(cpu); } cpumask_clear_cpu(cpu, policy->real_cpus); if (cpumask_empty(policy->real_cpus)) { cpufreq_policy_free(policy, true); - return 0; + return; } if (cpu != policy->kobj_cpu) { @@ -1557,8 +1484,6 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) policy->kobj_cpu = new_cpu; WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj)); } - - return 0; } static void handle_update(struct work_struct *work) @@ -1704,8 +1629,8 @@ int cpufreq_generic_suspend(struct cpufreq_policy *policy) int ret; if (!policy->suspend_freq) { - pr_err("%s: suspend_freq can't be zero\n", __func__); - return -EINVAL; + pr_debug("%s: suspend_freq not defined\n", __func__); + return 0; } pr_debug("%s: Setting suspend-freq: %u\n", __func__, @@ -2048,6 +1973,12 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, } out: + if (likely(retval != -EINVAL)) { + if (target_freq == policy->max) + cpu_nonscaling(policy->cpu); + else + cpu_scaling(policy->cpu); + } return retval; } EXPORT_SYMBOL_GPL(__cpufreq_driver_target); @@ -2109,8 +2040,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, if (!try_module_get(policy->governor->owner)) return -EINVAL; - pr_debug("__cpufreq_governor for CPU %u, event %u\n", - policy->cpu, event); + pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event); mutex_lock(&cpufreq_governor_lock); if ((policy->governor_enabled && event == CPUFREQ_GOV_START) @@ -2247,7 +2177,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); - if (new_policy->min > policy->max || new_policy->max < policy->min) + /* + * This check works well when we store new min/max freq attributes, + * because new_policy is a copy of policy with one field updated. + */ + if (new_policy->min > new_policy->max) return -EINVAL; /* verify the cpu speed can be set within this limit */ @@ -2259,10 +2193,6 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_ADJUST, new_policy); - /* adjust if necessary - hardware incompatibility*/ - blocking_notifier_call_chain(&cpufreq_policy_notifier_list, - CPUFREQ_INCOMPATIBLE, new_policy); - /* * verify the cpu speed can be set within this limit, which might be * different to the first one @@ -2296,16 +2226,31 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, old_gov = policy->governor; /* end old governor */ if (old_gov) { - __cpufreq_governor(policy, CPUFREQ_GOV_STOP); + ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); + if (ret) { + /* This can happen due to race with other operations */ + pr_debug("%s: Failed to Stop Governor: %s (%d)\n", + __func__, old_gov->name, ret); + return ret; + } + up_write(&policy->rwsem); - __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); + ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); down_write(&policy->rwsem); + + if (ret) { + pr_err("%s: Failed to Exit Governor: %s (%d)\n", + __func__, old_gov->name, ret); + return ret; + } } /* start new governor */ policy->governor = new_policy->governor; - if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) { - if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) + ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); + if (!ret) { + ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); + if (!ret) goto out; up_write(&policy->rwsem); @@ -2317,11 +2262,13 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, pr_debug("starting governor %s failed\n", policy->governor->name); if (old_gov) { policy->governor = old_gov; - __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); - __cpufreq_governor(policy, CPUFREQ_GOV_START); + if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) + policy->governor = NULL; + else + __cpufreq_governor(policy, CPUFREQ_GOV_START); } - return -EINVAL; + return ret; out: pr_debug("governor: change or update limits\n"); @@ -2350,8 +2297,6 @@ int cpufreq_update_policy(unsigned int cpu) memcpy(&new_policy, policy, sizeof(*policy)); new_policy.min = policy->user_policy.min; new_policy.max = policy->user_policy.max; - new_policy.policy = policy->user_policy.policy; - new_policy.governor = policy->user_policy.governor; /* * BIOS might change freq behind our back @@ -2387,27 +2332,23 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - struct device *dev; - dev = get_cpu_device(cpu); - if (dev) { - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_ONLINE: - cpufreq_add_dev(dev, NULL); - break; + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: + cpufreq_online(cpu); + break; - case CPU_DOWN_PREPARE: - __cpufreq_remove_dev_prepare(dev); - break; + case CPU_DOWN_PREPARE: + cpufreq_offline_prepare(cpu); + break; - case CPU_POST_DEAD: - __cpufreq_remove_dev_finish(dev); - break; + case CPU_POST_DEAD: + cpufreq_offline_finish(cpu); + break; - case CPU_DOWN_FAILED: - cpufreq_add_dev(dev, NULL); - break; - } + case CPU_DOWN_FAILED: + cpufreq_online(cpu); + break; } return NOTIFY_OK; } @@ -2477,6 +2418,49 @@ int cpufreq_boost_supported(void) } EXPORT_SYMBOL_GPL(cpufreq_boost_supported); +static int create_boost_sysfs_file(void) +{ + int ret; + + if (!cpufreq_boost_supported()) + return 0; + + /* + * Check if driver provides function to enable boost - + * if not, use cpufreq_boost_set_sw as default + */ + if (!cpufreq_driver->set_boost) + cpufreq_driver->set_boost = cpufreq_boost_set_sw; + + ret = cpufreq_sysfs_create_file(&boost.attr); + if (ret) + pr_err("%s: cannot register global BOOST sysfs file\n", + __func__); + + return ret; +} + +static void remove_boost_sysfs_file(void) +{ + if (cpufreq_boost_supported()) + cpufreq_sysfs_remove_file(&boost.attr); +} + +int cpufreq_enable_boost_support(void) +{ + if (!cpufreq_driver) + return -EINVAL; + + if (cpufreq_boost_supported()) + return 0; + + cpufreq_driver->boost_supported = true; + + /* This will get removed on driver unregister */ + return create_boost_sysfs_file(); +} +EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support); + int cpufreq_boost_enabled(void) { return cpufreq_driver->boost_enabled; @@ -2515,10 +2499,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) pr_debug("trying to register driver %s\n", driver_data->name); + /* Protect against concurrent CPU online/offline. */ + get_online_cpus(); + write_lock_irqsave(&cpufreq_driver_lock, flags); if (cpufreq_driver) { write_unlock_irqrestore(&cpufreq_driver_lock, flags); - return -EEXIST; + ret = -EEXIST; + goto out; } cpufreq_driver = driver_data; write_unlock_irqrestore(&cpufreq_driver_lock, flags); @@ -2526,21 +2514,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) if (driver_data->setpolicy) driver_data->flags |= CPUFREQ_CONST_LOOPS; - if (cpufreq_boost_supported()) { - /* - * Check if driver provides function to enable boost - - * if not, use cpufreq_boost_set_sw as default - */ - if (!cpufreq_driver->set_boost) - cpufreq_driver->set_boost = cpufreq_boost_set_sw; - - ret = cpufreq_sysfs_create_file(&boost.attr); - if (ret) { - pr_err("%s: cannot register global BOOST sysfs file\n", - __func__); - goto err_null_driver; - } - } + ret = create_boost_sysfs_file(); + if (ret) + goto err_null_driver; ret = subsys_interface_register(&cpufreq_interface); if (ret) @@ -2557,17 +2533,19 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) register_hotcpu_notifier(&cpufreq_cpu_notifier); pr_debug("driver %s up and running\n", driver_data->name); - return 0; +out: + put_online_cpus(); + return ret; + err_if_unreg: subsys_interface_unregister(&cpufreq_interface); err_boost_unreg: - if (cpufreq_boost_supported()) - cpufreq_sysfs_remove_file(&boost.attr); + remove_boost_sysfs_file(); err_null_driver: write_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver = NULL; write_unlock_irqrestore(&cpufreq_driver_lock, flags); - return ret; + goto out; } EXPORT_SYMBOL_GPL(cpufreq_register_driver); @@ -2588,19 +2566,18 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) pr_debug("unregistering driver %s\n", driver->name); + /* Protect against concurrent cpu hotplug */ + get_online_cpus(); subsys_interface_unregister(&cpufreq_interface); - if (cpufreq_boost_supported()) - cpufreq_sysfs_remove_file(&boost.attr); - + remove_boost_sysfs_file(); unregister_hotcpu_notifier(&cpufreq_cpu_notifier); - down_write(&cpufreq_rwsem); write_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver = NULL; write_unlock_irqrestore(&cpufreq_driver_lock, flags); - up_write(&cpufreq_rwsem); + put_online_cpus(); return 0; } diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index c86a10c30..f13e26e47 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -15,8 +15,14 @@ #include "cpufreq_governor.h" /* Conservative governor macros */ +#ifdef CONFIG_SCHED_BFS +#define DEF_FREQUENCY_UP_THRESHOLD (63) +#define DEF_FREQUENCY_DOWN_THRESHOLD (26) +#else #define DEF_FREQUENCY_UP_THRESHOLD (80) #define DEF_FREQUENCY_DOWN_THRESHOLD (20) +#endif + #define DEF_FREQUENCY_STEP (5) #define DEF_SAMPLING_DOWN_FACTOR (1) #define MAX_SAMPLING_DOWN_FACTOR (10) @@ -47,7 +53,7 @@ static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners, static void cs_check_cpu(int cpu, unsigned int load) { struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); - struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; + struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy; struct dbs_data *dbs_data = policy->governor_data; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; @@ -102,26 +108,15 @@ static void cs_check_cpu(int cpu, unsigned int load) } } -static void cs_dbs_timer(struct work_struct *work) +static unsigned int cs_dbs_timer(struct cpu_dbs_info *cdbs, + struct dbs_data *dbs_data, bool modify_all) { - struct cs_cpu_dbs_info_s *dbs_info = container_of(work, - struct cs_cpu_dbs_info_s, cdbs.work.work); - unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; - struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info, - cpu); - struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; - int delay = delay_for_sampling_rate(cs_tuners->sampling_rate); - bool modify_all = true; - mutex_lock(&core_dbs_info->cdbs.timer_mutex); - if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate)) - modify_all = false; - else - dbs_check_cpu(dbs_data, cpu); + if (modify_all) + dbs_check_cpu(dbs_data, cdbs->shared->policy->cpu); - gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all); - mutex_unlock(&core_dbs_info->cdbs.timer_mutex); + return delay_for_sampling_rate(cs_tuners->sampling_rate); } static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, @@ -135,7 +130,7 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, if (!dbs_info->enable) return 0; - policy = dbs_info->cdbs.cur_policy; + policy = dbs_info->cdbs.shared->policy; /* * we only care if our internally tracked freq moves outside the 'valid' diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 57a39f8a9..939197ffa 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -32,10 +32,10 @@ static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data) void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) { - struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); + struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); struct od_dbs_tuners *od_tuners = dbs_data->tuners; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; - struct cpufreq_policy *policy; + struct cpufreq_policy *policy = cdbs->shared->policy; unsigned int sampling_rate; unsigned int max_load = 0; unsigned int ignore_nice; @@ -60,11 +60,9 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) ignore_nice = cs_tuners->ignore_nice_load; } - policy = cdbs->cur_policy; - /* Get Absolute Load */ for_each_cpu(j, policy->cpus) { - struct cpu_dbs_common_info *j_cdbs; + struct cpu_dbs_info *j_cdbs; u64 cur_wall_time, cur_idle_time; unsigned int idle_time, wall_time; unsigned int load; @@ -163,9 +161,9 @@ EXPORT_SYMBOL_GPL(dbs_check_cpu); static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data, unsigned int delay) { - struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); + struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); - mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay); + mod_delayed_work_on(cpu, system_wq, &cdbs->dwork, delay); } void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, @@ -199,33 +197,63 @@ EXPORT_SYMBOL_GPL(gov_queue_work); static inline void gov_cancel_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy) { - struct cpu_dbs_common_info *cdbs; + struct cpu_dbs_info *cdbs; int i; for_each_cpu(i, policy->cpus) { cdbs = dbs_data->cdata->get_cpu_cdbs(i); - cancel_delayed_work_sync(&cdbs->work); + cancel_delayed_work_sync(&cdbs->dwork); } } /* Will return if we need to evaluate cpu load again or not */ -bool need_load_eval(struct cpu_dbs_common_info *cdbs, - unsigned int sampling_rate) +static bool need_load_eval(struct cpu_common_dbs_info *shared, + unsigned int sampling_rate) { - if (policy_is_shared(cdbs->cur_policy)) { + if (policy_is_shared(shared->policy)) { ktime_t time_now = ktime_get(); - s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp); + s64 delta_us = ktime_us_delta(time_now, shared->time_stamp); /* Do nothing if we recently have sampled */ if (delta_us < (s64)(sampling_rate / 2)) return false; else - cdbs->time_stamp = time_now; + shared->time_stamp = time_now; } return true; } -EXPORT_SYMBOL_GPL(need_load_eval); + +static void dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info *cdbs = container_of(work, struct cpu_dbs_info, + dwork.work); + struct cpu_common_dbs_info *shared = cdbs->shared; + struct cpufreq_policy *policy = shared->policy; + struct dbs_data *dbs_data = policy->governor_data; + unsigned int sampling_rate, delay; + bool modify_all = true; + + mutex_lock(&shared->timer_mutex); + + if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { + struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; + + sampling_rate = cs_tuners->sampling_rate; + } else { + struct od_dbs_tuners *od_tuners = dbs_data->tuners; + + sampling_rate = od_tuners->sampling_rate; + } + + if (!need_load_eval(cdbs->shared, sampling_rate)) + modify_all = false; + + delay = dbs_data->cdata->gov_dbs_timer(cdbs, dbs_data, modify_all); + gov_queue_work(dbs_data, policy, delay, modify_all); + + mutex_unlock(&shared->timer_mutex); +} static void set_sampling_rate(struct dbs_data *dbs_data, unsigned int sampling_rate) @@ -239,6 +267,37 @@ static void set_sampling_rate(struct dbs_data *dbs_data, } } +static int alloc_common_dbs_info(struct cpufreq_policy *policy, + struct common_dbs_data *cdata) +{ + struct cpu_common_dbs_info *shared; + int j; + + /* Allocate memory for the common information for policy->cpus */ + shared = kzalloc(sizeof(*shared), GFP_KERNEL); + if (!shared) + return -ENOMEM; + + /* Set shared for all CPUs, online+offline */ + for_each_cpu(j, policy->related_cpus) + cdata->get_cpu_cdbs(j)->shared = shared; + + return 0; +} + +static void free_common_dbs_info(struct cpufreq_policy *policy, + struct common_dbs_data *cdata) +{ + struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu); + struct cpu_common_dbs_info *shared = cdbs->shared; + int j; + + for_each_cpu(j, policy->cpus) + cdata->get_cpu_cdbs(j)->shared = NULL; + + kfree(shared); +} + static int cpufreq_governor_init(struct cpufreq_policy *policy, struct dbs_data *dbs_data, struct common_dbs_data *cdata) @@ -246,9 +305,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy, unsigned int latency; int ret; + /* State should be equivalent to EXIT */ + if (policy->governor_data) + return -EBUSY; + if (dbs_data) { if (WARN_ON(have_governor_per_policy())) return -EINVAL; + + ret = alloc_common_dbs_info(policy, cdata); + if (ret) + return ret; + dbs_data->usage_count++; policy->governor_data = dbs_data; return 0; @@ -258,12 +326,16 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy, if (!dbs_data) return -ENOMEM; + ret = alloc_common_dbs_info(policy, cdata); + if (ret) + goto free_dbs_data; + dbs_data->cdata = cdata; dbs_data->usage_count = 1; ret = cdata->init(dbs_data, !policy->governor->initialized); if (ret) - goto free_dbs_data; + goto free_common_dbs_info; /* policy latency is in ns. Convert it to us first */ latency = policy->cpuinfo.transition_latency / 1000; @@ -300,15 +372,22 @@ put_kobj: } cdata_exit: cdata->exit(dbs_data, !policy->governor->initialized); +free_common_dbs_info: + free_common_dbs_info(policy, cdata); free_dbs_data: kfree(dbs_data); return ret; } -static void cpufreq_governor_exit(struct cpufreq_policy *policy, - struct dbs_data *dbs_data) +static int cpufreq_governor_exit(struct cpufreq_policy *policy, + struct dbs_data *dbs_data) { struct common_dbs_data *cdata = dbs_data->cdata; + struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu); + + /* State should be equivalent to INIT */ + if (!cdbs->shared || cdbs->shared->policy) + return -EBUSY; policy->governor_data = NULL; if (!--dbs_data->usage_count) { @@ -323,6 +402,9 @@ static void cpufreq_governor_exit(struct cpufreq_policy *policy, cdata->exit(dbs_data, policy->governor->initialized == 1); kfree(dbs_data); } + + free_common_dbs_info(policy, cdata); + return 0; } static int cpufreq_governor_start(struct cpufreq_policy *policy, @@ -330,12 +412,17 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy, { struct common_dbs_data *cdata = dbs_data->cdata; unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu; - struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu); + struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu); + struct cpu_common_dbs_info *shared = cdbs->shared; int io_busy = 0; if (!policy->cur) return -EINVAL; + /* State should be equivalent to INIT */ + if (!shared || shared->policy) + return -EBUSY; + if (cdata->governor == GOV_CONSERVATIVE) { struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; @@ -349,12 +436,14 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy, io_busy = od_tuners->io_is_busy; } + shared->policy = policy; + shared->time_stamp = ktime_get(); + mutex_init(&shared->timer_mutex); + for_each_cpu(j, policy->cpus) { - struct cpu_dbs_common_info *j_cdbs = cdata->get_cpu_cdbs(j); + struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j); unsigned int prev_load; - j_cdbs->cpu = j; - j_cdbs->cur_policy = policy; j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy); @@ -366,8 +455,7 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy, if (ignore_nice) j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - mutex_init(&j_cdbs->timer_mutex); - INIT_DEFERRABLE_WORK(&j_cdbs->work, cdata->gov_dbs_timer); + INIT_DEFERRABLE_WORK(&j_cdbs->dwork, dbs_timer); } if (cdata->governor == GOV_CONSERVATIVE) { @@ -386,20 +474,24 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy, od_ops->powersave_bias_init_cpu(cpu); } - /* Initiate timer time stamp */ - cpu_cdbs->time_stamp = ktime_get(); - gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate), true); return 0; } -static void cpufreq_governor_stop(struct cpufreq_policy *policy, - struct dbs_data *dbs_data) +static int cpufreq_governor_stop(struct cpufreq_policy *policy, + struct dbs_data *dbs_data) { struct common_dbs_data *cdata = dbs_data->cdata; unsigned int cpu = policy->cpu; - struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu); + struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu); + struct cpu_common_dbs_info *shared = cdbs->shared; + + /* State should be equivalent to START */ + if (!shared || !shared->policy) + return -EBUSY; + + gov_cancel_work(dbs_data, policy); if (cdata->governor == GOV_CONSERVATIVE) { struct cs_cpu_dbs_info_s *cs_dbs_info = @@ -408,38 +500,40 @@ static void cpufreq_governor_stop(struct cpufreq_policy *policy, cs_dbs_info->enable = 0; } - gov_cancel_work(dbs_data, policy); - - mutex_destroy(&cpu_cdbs->timer_mutex); - cpu_cdbs->cur_policy = NULL; + shared->policy = NULL; + mutex_destroy(&shared->timer_mutex); + return 0; } -static void cpufreq_governor_limits(struct cpufreq_policy *policy, - struct dbs_data *dbs_data) +static int cpufreq_governor_limits(struct cpufreq_policy *policy, + struct dbs_data *dbs_data) { struct common_dbs_data *cdata = dbs_data->cdata; unsigned int cpu = policy->cpu; - struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu); + struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu); - if (!cpu_cdbs->cur_policy) - return; + /* State should be equivalent to START */ + if (!cdbs->shared || !cdbs->shared->policy) + return -EBUSY; - mutex_lock(&cpu_cdbs->timer_mutex); - if (policy->max < cpu_cdbs->cur_policy->cur) - __cpufreq_driver_target(cpu_cdbs->cur_policy, policy->max, + mutex_lock(&cdbs->shared->timer_mutex); + if (policy->max < cdbs->shared->policy->cur) + __cpufreq_driver_target(cdbs->shared->policy, policy->max, CPUFREQ_RELATION_H); - else if (policy->min > cpu_cdbs->cur_policy->cur) - __cpufreq_driver_target(cpu_cdbs->cur_policy, policy->min, + else if (policy->min > cdbs->shared->policy->cur) + __cpufreq_driver_target(cdbs->shared->policy, policy->min, CPUFREQ_RELATION_L); dbs_check_cpu(dbs_data, cpu); - mutex_unlock(&cpu_cdbs->timer_mutex); + mutex_unlock(&cdbs->shared->timer_mutex); + + return 0; } int cpufreq_governor_dbs(struct cpufreq_policy *policy, struct common_dbs_data *cdata, unsigned int event) { struct dbs_data *dbs_data; - int ret = 0; + int ret; /* Lock governor to block concurrent initialization of governor */ mutex_lock(&cdata->mutex); @@ -449,7 +543,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, else dbs_data = cdata->gdbs_data; - if (WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT))) { + if (!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)) { ret = -EINVAL; goto unlock; } @@ -459,17 +553,19 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, ret = cpufreq_governor_init(policy, dbs_data, cdata); break; case CPUFREQ_GOV_POLICY_EXIT: - cpufreq_governor_exit(policy, dbs_data); + ret = cpufreq_governor_exit(policy, dbs_data); break; case CPUFREQ_GOV_START: ret = cpufreq_governor_start(policy, dbs_data); break; case CPUFREQ_GOV_STOP: - cpufreq_governor_stop(policy, dbs_data); + ret = cpufreq_governor_stop(policy, dbs_data); break; case CPUFREQ_GOV_LIMITS: - cpufreq_governor_limits(policy, dbs_data); + ret = cpufreq_governor_limits(policy, dbs_data); break; + default: + ret = -EINVAL; } unlock: diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 34736f5e8..50f171796 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -109,7 +109,7 @@ store_one(_gov, file_name) /* create helper routines */ #define define_get_cpu_dbs_routines(_dbs_info) \ -static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu) \ +static struct cpu_dbs_info *get_cpu_cdbs(int cpu) \ { \ return &per_cpu(_dbs_info, cpu).cdbs; \ } \ @@ -128,9 +128,20 @@ static void *get_cpu_dbs_info_s(int cpu) \ * cs_*: Conservative governor */ +/* Common to all CPUs of a policy */ +struct cpu_common_dbs_info { + struct cpufreq_policy *policy; + /* + * percpu mutex that serializes governor limit change with dbs_timer + * invocation. We do not want dbs_timer to run when user is changing + * the governor or limits. + */ + struct mutex timer_mutex; + ktime_t time_stamp; +}; + /* Per cpu structures */ -struct cpu_dbs_common_info { - int cpu; +struct cpu_dbs_info { u64 prev_cpu_idle; u64 prev_cpu_wall; u64 prev_cpu_nice; @@ -141,19 +152,12 @@ struct cpu_dbs_common_info { * wake-up from idle. */ unsigned int prev_load; - struct cpufreq_policy *cur_policy; - struct delayed_work work; - /* - * percpu mutex that serializes governor limit change with gov_dbs_timer - * invocation. We do not want gov_dbs_timer to run when user is changing - * the governor or limits. - */ - struct mutex timer_mutex; - ktime_t time_stamp; + struct delayed_work dwork; + struct cpu_common_dbs_info *shared; }; struct od_cpu_dbs_info_s { - struct cpu_dbs_common_info cdbs; + struct cpu_dbs_info cdbs; struct cpufreq_frequency_table *freq_table; unsigned int freq_lo; unsigned int freq_lo_jiffies; @@ -163,7 +167,7 @@ struct od_cpu_dbs_info_s { }; struct cs_cpu_dbs_info_s { - struct cpu_dbs_common_info cdbs; + struct cpu_dbs_info cdbs; unsigned int down_skip; unsigned int requested_freq; unsigned int enable:1; @@ -204,9 +208,11 @@ struct common_dbs_data { */ struct dbs_data *gdbs_data; - struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu); + struct cpu_dbs_info *(*get_cpu_cdbs)(int cpu); void *(*get_cpu_dbs_info_s)(int cpu); - void (*gov_dbs_timer)(struct work_struct *work); + unsigned int (*gov_dbs_timer)(struct cpu_dbs_info *cdbs, + struct dbs_data *dbs_data, + bool modify_all); void (*gov_check_cpu)(int cpu, unsigned int load); int (*init)(struct dbs_data *dbs_data, bool notify); void (*exit)(struct dbs_data *dbs_data, bool notify); @@ -265,8 +271,6 @@ static ssize_t show_sampling_rate_min_gov_pol \ extern struct mutex cpufreq_governor_lock; void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); -bool need_load_eval(struct cpu_dbs_common_info *cdbs, - unsigned int sampling_rate); int cpufreq_governor_dbs(struct cpufreq_policy *policy, struct common_dbs_data *cdata, unsigned int event); void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 63f038e71..d8e04745d 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -19,7 +19,12 @@ #include "cpufreq_governor.h" /* On-demand governor macros */ +#ifdef CONFIG_SCHED_BFS +#define DEF_FREQUENCY_UP_THRESHOLD (63) +#else #define DEF_FREQUENCY_UP_THRESHOLD (80) +#endif + #ifdef CONFIG_PCK_INTERACTIVE #define DEF_SAMPLING_DOWN_FACTOR (10) #else @@ -159,7 +164,7 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq) static void od_check_cpu(int cpu, unsigned int load) { struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; + struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy; struct dbs_data *dbs_data = policy->governor_data; struct od_dbs_tuners *od_tuners = dbs_data->tuners; @@ -195,46 +200,40 @@ static void od_check_cpu(int cpu, unsigned int load) } } -static void od_dbs_timer(struct work_struct *work) +static unsigned int od_dbs_timer(struct cpu_dbs_info *cdbs, + struct dbs_data *dbs_data, bool modify_all) { - struct od_cpu_dbs_info_s *dbs_info = - container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work); - unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; - struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info, + struct cpufreq_policy *policy = cdbs->shared->policy; + unsigned int cpu = policy->cpu; + struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data; struct od_dbs_tuners *od_tuners = dbs_data->tuners; - int delay = 0, sample_type = core_dbs_info->sample_type; - bool modify_all = true; + int delay = 0, sample_type = dbs_info->sample_type; - mutex_lock(&core_dbs_info->cdbs.timer_mutex); - if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) { - modify_all = false; + if (!modify_all) goto max_delay; - } /* Common NORMAL_SAMPLE setup */ - core_dbs_info->sample_type = OD_NORMAL_SAMPLE; + dbs_info->sample_type = OD_NORMAL_SAMPLE; if (sample_type == OD_SUB_SAMPLE) { - delay = core_dbs_info->freq_lo_jiffies; - __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy, - core_dbs_info->freq_lo, CPUFREQ_RELATION_H); + delay = dbs_info->freq_lo_jiffies; + __cpufreq_driver_target(policy, dbs_info->freq_lo, + CPUFREQ_RELATION_H); } else { dbs_check_cpu(dbs_data, cpu); - if (core_dbs_info->freq_lo) { + if (dbs_info->freq_lo) { /* Setup timer for SUB_SAMPLE */ - core_dbs_info->sample_type = OD_SUB_SAMPLE; - delay = core_dbs_info->freq_hi_jiffies; + dbs_info->sample_type = OD_SUB_SAMPLE; + delay = dbs_info->freq_hi_jiffies; } } max_delay: if (!delay) delay = delay_for_sampling_rate(od_tuners->sampling_rate - * core_dbs_info->rate_mult); + * dbs_info->rate_mult); - gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all); - mutex_unlock(&core_dbs_info->cdbs.timer_mutex); + return delay; } /************************** sysfs interface ************************/ @@ -277,27 +276,27 @@ static void update_sampling_rate(struct dbs_data *dbs_data, dbs_info = &per_cpu(od_cpu_dbs_info, cpu); cpufreq_cpu_put(policy); - mutex_lock(&dbs_info->cdbs.timer_mutex); + mutex_lock(&dbs_info->cdbs.shared->timer_mutex); - if (!delayed_work_pending(&dbs_info->cdbs.work)) { - mutex_unlock(&dbs_info->cdbs.timer_mutex); + if (!delayed_work_pending(&dbs_info->cdbs.dwork)) { + mutex_unlock(&dbs_info->cdbs.shared->timer_mutex); continue; } next_sampling = jiffies + usecs_to_jiffies(new_rate); - appointed_at = dbs_info->cdbs.work.timer.expires; + appointed_at = dbs_info->cdbs.dwork.timer.expires; if (time_before(next_sampling, appointed_at)) { - mutex_unlock(&dbs_info->cdbs.timer_mutex); - cancel_delayed_work_sync(&dbs_info->cdbs.work); - mutex_lock(&dbs_info->cdbs.timer_mutex); + mutex_unlock(&dbs_info->cdbs.shared->timer_mutex); + cancel_delayed_work_sync(&dbs_info->cdbs.dwork); + mutex_lock(&dbs_info->cdbs.shared->timer_mutex); - gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, - usecs_to_jiffies(new_rate), true); + gov_queue_work(dbs_data, policy, + usecs_to_jiffies(new_rate), true); } - mutex_unlock(&dbs_info->cdbs.timer_mutex); + mutex_unlock(&dbs_info->cdbs.shared->timer_mutex); } } @@ -560,13 +559,16 @@ static void od_set_powersave_bias(unsigned int powersave_bias) get_online_cpus(); for_each_online_cpu(cpu) { + struct cpu_common_dbs_info *shared; + if (cpumask_test_cpu(cpu, &done)) continue; - policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy; - if (!policy) + shared = per_cpu(od_cpu_dbs_info, cpu).cdbs.shared; + if (!shared) continue; + policy = shared->policy; cpumask_or(&done, &done, policy->cpus); if (policy->governor != &cpufreq_gov_ondemand) diff --git a/drivers/cpufreq/cpufreq_opp.c b/drivers/cpufreq/cpufreq_opp.c index 773bcde89..0f5e6d5f6 100644 --- a/drivers/cpufreq/cpufreq_opp.c +++ b/drivers/cpufreq/cpufreq_opp.c @@ -75,6 +75,10 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, } freq_table[i].driver_data = i; freq_table[i].frequency = rate / 1000; + + /* Is Boost/turbo opp ? */ + if (dev_pm_opp_is_turbo(opp)) + freq_table[i].flags = CPUFREQ_BOOST_FREQ; } freq_table[i].driver_data = i; diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c index a0d2a423c..4085244c8 100644 --- a/drivers/cpufreq/e_powersaver.c +++ b/drivers/cpufreq/e_powersaver.c @@ -78,7 +78,7 @@ static int eps_acpi_init(void) static int eps_acpi_exit(struct cpufreq_policy *policy) { if (eps_acpi_cpu_perf) { - acpi_processor_unregister_performance(eps_acpi_cpu_perf, 0); + acpi_processor_unregister_performance(0); free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map); kfree(eps_acpi_cpu_perf); eps_acpi_cpu_perf = NULL; diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c deleted file mode 100644 index fa3dd840a..000000000 --- a/drivers/cpufreq/exynos-cpufreq.c +++ /dev/null @@ -1,239 +0,0 @@ -/* - * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. - * http://www.samsung.com - * - * EXYNOS - CPU frequency scaling support for EXYNOS series - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "exynos-cpufreq.h" - -static struct exynos_dvfs_info *exynos_info; -static struct thermal_cooling_device *cdev; -static struct regulator *arm_regulator; -static unsigned int locking_frequency; - -static int exynos_cpufreq_get_index(unsigned int freq) -{ - struct cpufreq_frequency_table *freq_table = exynos_info->freq_table; - struct cpufreq_frequency_table *pos; - - cpufreq_for_each_entry(pos, freq_table) - if (pos->frequency == freq) - break; - - if (pos->frequency == CPUFREQ_TABLE_END) - return -EINVAL; - - return pos - freq_table; -} - -static int exynos_cpufreq_scale(unsigned int target_freq) -{ - struct cpufreq_frequency_table *freq_table = exynos_info->freq_table; - unsigned int *volt_table = exynos_info->volt_table; - struct cpufreq_policy *policy = cpufreq_cpu_get(0); - unsigned int arm_volt, safe_arm_volt = 0; - unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz; - struct device *dev = exynos_info->dev; - unsigned int old_freq; - int index, old_index; - int ret = 0; - - old_freq = policy->cur; - - /* - * The policy max have been changed so that we cannot get proper - * old_index with cpufreq_frequency_table_target(). Thus, ignore - * policy and get the index from the raw frequency table. - */ - old_index = exynos_cpufreq_get_index(old_freq); - if (old_index < 0) { - ret = old_index; - goto out; - } - - index = exynos_cpufreq_get_index(target_freq); - if (index < 0) { - ret = index; - goto out; - } - - /* - * ARM clock source will be changed APLL to MPLL temporary - * To support this level, need to control regulator for - * required voltage level - */ - if (exynos_info->need_apll_change != NULL) { - if (exynos_info->need_apll_change(old_index, index) && - (freq_table[index].frequency < mpll_freq_khz) && - (freq_table[old_index].frequency < mpll_freq_khz)) - safe_arm_volt = volt_table[exynos_info->pll_safe_idx]; - } - arm_volt = volt_table[index]; - - /* When the new frequency is higher than current frequency */ - if ((target_freq > old_freq) && !safe_arm_volt) { - /* Firstly, voltage up to increase frequency */ - ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt); - if (ret) { - dev_err(dev, "failed to set cpu voltage to %d\n", - arm_volt); - return ret; - } - } - - if (safe_arm_volt) { - ret = regulator_set_voltage(arm_regulator, safe_arm_volt, - safe_arm_volt); - if (ret) { - dev_err(dev, "failed to set cpu voltage to %d\n", - safe_arm_volt); - return ret; - } - } - - exynos_info->set_freq(old_index, index); - - /* When the new frequency is lower than current frequency */ - if ((target_freq < old_freq) || - ((target_freq > old_freq) && safe_arm_volt)) { - /* down the voltage after frequency change */ - ret = regulator_set_voltage(arm_regulator, arm_volt, - arm_volt); - if (ret) { - dev_err(dev, "failed to set cpu voltage to %d\n", - arm_volt); - goto out; - } - } - -out: - cpufreq_cpu_put(policy); - - return ret; -} - -static int exynos_target(struct cpufreq_policy *policy, unsigned int index) -{ - return exynos_cpufreq_scale(exynos_info->freq_table[index].frequency); -} - -static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) -{ - policy->clk = exynos_info->cpu_clk; - policy->suspend_freq = locking_frequency; - return cpufreq_generic_init(policy, exynos_info->freq_table, 100000); -} - -static struct cpufreq_driver exynos_driver = { - .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, - .verify = cpufreq_generic_frequency_table_verify, - .target_index = exynos_target, - .get = cpufreq_generic_get, - .init = exynos_cpufreq_cpu_init, - .name = "exynos_cpufreq", - .attr = cpufreq_generic_attr, -#ifdef CONFIG_ARM_EXYNOS_CPU_FREQ_BOOST_SW - .boost_supported = true, -#endif -#ifdef CONFIG_PM - .suspend = cpufreq_generic_suspend, -#endif -}; - -static int exynos_cpufreq_probe(struct platform_device *pdev) -{ - struct device_node *cpu0; - int ret = -EINVAL; - - exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL); - if (!exynos_info) - return -ENOMEM; - - exynos_info->dev = &pdev->dev; - - if (of_machine_is_compatible("samsung,exynos4212")) { - exynos_info->type = EXYNOS_SOC_4212; - ret = exynos4x12_cpufreq_init(exynos_info); - } else if (of_machine_is_compatible("samsung,exynos4412")) { - exynos_info->type = EXYNOS_SOC_4412; - ret = exynos4x12_cpufreq_init(exynos_info); - } else if (of_machine_is_compatible("samsung,exynos5250")) { - exynos_info->type = EXYNOS_SOC_5250; - ret = exynos5250_cpufreq_init(exynos_info); - } else { - pr_err("%s: Unknown SoC type\n", __func__); - ret = -ENODEV; - } - - if (ret) - goto err_vdd_arm; - - if (exynos_info->set_freq == NULL) { - dev_err(&pdev->dev, "No set_freq function (ERR)\n"); - ret = -EINVAL; - goto err_vdd_arm; - } - - arm_regulator = regulator_get(NULL, "vdd_arm"); - if (IS_ERR(arm_regulator)) { - dev_err(&pdev->dev, "failed to get resource vdd_arm\n"); - ret = -EINVAL; - goto err_vdd_arm; - } - - /* Done here as we want to capture boot frequency */ - locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000; - - ret = cpufreq_register_driver(&exynos_driver); - if (ret) - goto err_cpufreq_reg; - - cpu0 = of_get_cpu_node(0, NULL); - if (!cpu0) { - pr_err("failed to find cpu0 node\n"); - return 0; - } - - if (of_find_property(cpu0, "#cooling-cells", NULL)) { - cdev = of_cpufreq_cooling_register(cpu0, - cpu_present_mask); - if (IS_ERR(cdev)) - pr_err("running cpufreq without cooling device: %ld\n", - PTR_ERR(cdev)); - } - - return 0; - -err_cpufreq_reg: - dev_err(&pdev->dev, "failed to register cpufreq driver\n"); - regulator_put(arm_regulator); -err_vdd_arm: - kfree(exynos_info); - return ret; -} - -static struct platform_driver exynos_cpufreq_platdrv = { - .driver = { - .name = "exynos-cpufreq", - }, - .probe = exynos_cpufreq_probe, -}; -module_platform_driver(exynos_cpufreq_platdrv); diff --git a/drivers/cpufreq/exynos-cpufreq.h b/drivers/cpufreq/exynos-cpufreq.h deleted file mode 100644 index a3855e4d9..000000000 --- a/drivers/cpufreq/exynos-cpufreq.h +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (c) 2010 Samsung Electronics Co., Ltd. - * http://www.samsung.com - * - * EXYNOS - CPUFreq support - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -enum cpufreq_level_index { - L0, L1, L2, L3, L4, - L5, L6, L7, L8, L9, - L10, L11, L12, L13, L14, - L15, L16, L17, L18, L19, - L20, -}; - -enum exynos_soc_type { - EXYNOS_SOC_4212, - EXYNOS_SOC_4412, - EXYNOS_SOC_5250, -}; - -#define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \ - { \ - .freq = (f) * 1000, \ - .clk_div_cpu0 = ((a0) | (a1) << 4 | (a2) << 8 | (a3) << 12 | \ - (a4) << 16 | (a5) << 20 | (a6) << 24 | (a7) << 28), \ - .clk_div_cpu1 = (b0 << 0 | b1 << 4 | b2 << 8), \ - .mps = ((m) << 16 | (p) << 8 | (s)), \ - } - -struct apll_freq { - unsigned int freq; - u32 clk_div_cpu0; - u32 clk_div_cpu1; - u32 mps; -}; - -struct exynos_dvfs_info { - enum exynos_soc_type type; - struct device *dev; - unsigned long mpll_freq_khz; - unsigned int pll_safe_idx; - struct clk *cpu_clk; - unsigned int *volt_table; - struct cpufreq_frequency_table *freq_table; - void (*set_freq)(unsigned int, unsigned int); - bool (*need_apll_change)(unsigned int, unsigned int); - void __iomem *cmu_regs; -}; - -#ifdef CONFIG_ARM_EXYNOS4X12_CPUFREQ -extern int exynos4x12_cpufreq_init(struct exynos_dvfs_info *); -#else -static inline int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) -{ - return -EOPNOTSUPP; -} -#endif -#ifdef CONFIG_ARM_EXYNOS5250_CPUFREQ -extern int exynos5250_cpufreq_init(struct exynos_dvfs_info *); -#else -static inline int exynos5250_cpufreq_init(struct exynos_dvfs_info *info) -{ - return -EOPNOTSUPP; -} -#endif - -#define EXYNOS4_CLKSRC_CPU 0x14200 -#define EXYNOS4_CLKMUX_STATCPU 0x14400 - -#define EXYNOS4_CLKDIV_CPU 0x14500 -#define EXYNOS4_CLKDIV_CPU1 0x14504 -#define EXYNOS4_CLKDIV_STATCPU 0x14600 -#define EXYNOS4_CLKDIV_STATCPU1 0x14604 - -#define EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT (16) -#define EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK (0x7 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT) - -#define EXYNOS5_APLL_LOCK 0x00000 -#define EXYNOS5_APLL_CON0 0x00100 -#define EXYNOS5_CLKMUX_STATCPU 0x00400 -#define EXYNOS5_CLKDIV_CPU0 0x00500 -#define EXYNOS5_CLKDIV_CPU1 0x00504 -#define EXYNOS5_CLKDIV_STATCPU0 0x00600 -#define EXYNOS5_CLKDIV_STATCPU1 0x00604 diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c deleted file mode 100644 index 9e78a850e..000000000 --- a/drivers/cpufreq/exynos4x12-cpufreq.c +++ /dev/null @@ -1,236 +0,0 @@ -/* - * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd. - * http://www.samsung.com - * - * EXYNOS4X12 - CPU frequency scaling support - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "exynos-cpufreq.h" - -static struct clk *cpu_clk; -static struct clk *moutcore; -static struct clk *mout_mpll; -static struct clk *mout_apll; -static struct exynos_dvfs_info *cpufreq; - -static unsigned int exynos4x12_volt_table[] = { - 1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500, - 1000000, 987500, 975000, 950000, 925000, 900000, 900000 -}; - -static struct cpufreq_frequency_table exynos4x12_freq_table[] = { - {CPUFREQ_BOOST_FREQ, L0, 1500 * 1000}, - {0, L1, 1400 * 1000}, - {0, L2, 1300 * 1000}, - {0, L3, 1200 * 1000}, - {0, L4, 1100 * 1000}, - {0, L5, 1000 * 1000}, - {0, L6, 900 * 1000}, - {0, L7, 800 * 1000}, - {0, L8, 700 * 1000}, - {0, L9, 600 * 1000}, - {0, L10, 500 * 1000}, - {0, L11, 400 * 1000}, - {0, L12, 300 * 1000}, - {0, L13, 200 * 1000}, - {0, 0, CPUFREQ_TABLE_END}, -}; - -static struct apll_freq *apll_freq_4x12; - -static struct apll_freq apll_freq_4212[] = { - /* - * values: - * freq - * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, CORE2 - * clock divider for COPY, HPM, RESERVED - * PLL M, P, S - */ - APLL_FREQ(1500, 0, 3, 7, 0, 6, 1, 2, 0, 6, 2, 0, 250, 4, 0), - APLL_FREQ(1400, 0, 3, 7, 0, 6, 1, 2, 0, 6, 2, 0, 175, 3, 0), - APLL_FREQ(1300, 0, 3, 7, 0, 5, 1, 2, 0, 5, 2, 0, 325, 6, 0), - APLL_FREQ(1200, 0, 3, 7, 0, 5, 1, 2, 0, 5, 2, 0, 200, 4, 0), - APLL_FREQ(1100, 0, 3, 6, 0, 4, 1, 2, 0, 4, 2, 0, 275, 6, 0), - APLL_FREQ(1000, 0, 2, 5, 0, 4, 1, 1, 0, 4, 2, 0, 125, 3, 0), - APLL_FREQ(900, 0, 2, 5, 0, 3, 1, 1, 0, 3, 2, 0, 150, 4, 0), - APLL_FREQ(800, 0, 2, 5, 0, 3, 1, 1, 0, 3, 2, 0, 100, 3, 0), - APLL_FREQ(700, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 175, 3, 1), - APLL_FREQ(600, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 200, 4, 1), - APLL_FREQ(500, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 125, 3, 1), - APLL_FREQ(400, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 100, 3, 1), - APLL_FREQ(300, 0, 2, 4, 0, 2, 1, 1, 0, 3, 2, 0, 200, 4, 2), - APLL_FREQ(200, 0, 1, 3, 0, 1, 1, 1, 0, 3, 2, 0, 100, 3, 2), -}; - -static struct apll_freq apll_freq_4412[] = { - /* - * values: - * freq - * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, CORE2 - * clock divider for COPY, HPM, CORES - * PLL M, P, S - */ - APLL_FREQ(1500, 0, 3, 7, 0, 6, 1, 2, 0, 6, 0, 7, 250, 4, 0), - APLL_FREQ(1400, 0, 3, 7, 0, 6, 1, 2, 0, 6, 0, 6, 175, 3, 0), - APLL_FREQ(1300, 0, 3, 7, 0, 5, 1, 2, 0, 5, 0, 6, 325, 6, 0), - APLL_FREQ(1200, 0, 3, 7, 0, 5, 1, 2, 0, 5, 0, 5, 200, 4, 0), - APLL_FREQ(1100, 0, 3, 6, 0, 4, 1, 2, 0, 4, 0, 5, 275, 6, 0), - APLL_FREQ(1000, 0, 2, 5, 0, 4, 1, 1, 0, 4, 0, 4, 125, 3, 0), - APLL_FREQ(900, 0, 2, 5, 0, 3, 1, 1, 0, 3, 0, 4, 150, 4, 0), - APLL_FREQ(800, 0, 2, 5, 0, 3, 1, 1, 0, 3, 0, 3, 100, 3, 0), - APLL_FREQ(700, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 3, 175, 3, 1), - APLL_FREQ(600, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 2, 200, 4, 1), - APLL_FREQ(500, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 2, 125, 3, 1), - APLL_FREQ(400, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 1, 100, 3, 1), - APLL_FREQ(300, 0, 2, 4, 0, 2, 1, 1, 0, 3, 0, 1, 200, 4, 2), - APLL_FREQ(200, 0, 1, 3, 0, 1, 1, 1, 0, 3, 0, 0, 100, 3, 2), -}; - -static void exynos4x12_set_clkdiv(unsigned int div_index) -{ - unsigned int tmp; - - /* Change Divider - CPU0 */ - - tmp = apll_freq_4x12[div_index].clk_div_cpu0; - - __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU); - - while (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU) - & 0x11111111) - cpu_relax(); - - /* Change Divider - CPU1 */ - tmp = apll_freq_4x12[div_index].clk_div_cpu1; - - __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1); - - do { - cpu_relax(); - tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1); - } while (tmp != 0x0); -} - -static void exynos4x12_set_apll(unsigned int index) -{ - unsigned int tmp, freq = apll_freq_4x12[index].freq; - - /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ - clk_set_parent(moutcore, mout_mpll); - - do { - cpu_relax(); - tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU) - >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT); - tmp &= 0x7; - } while (tmp != 0x2); - - clk_set_rate(mout_apll, freq * 1000); - - /* MUX_CORE_SEL = APLL */ - clk_set_parent(moutcore, mout_apll); - - do { - cpu_relax(); - tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU); - tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK; - } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); -} - -static void exynos4x12_set_frequency(unsigned int old_index, - unsigned int new_index) -{ - if (old_index > new_index) { - exynos4x12_set_clkdiv(new_index); - exynos4x12_set_apll(new_index); - } else if (old_index < new_index) { - exynos4x12_set_apll(new_index); - exynos4x12_set_clkdiv(new_index); - } -} - -int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) -{ - struct device_node *np; - unsigned long rate; - - /* - * HACK: This is a temporary workaround to get access to clock - * controller registers directly and remove static mappings and - * dependencies on platform headers. It is necessary to enable - * Exynos multi-platform support and will be removed together with - * this whole driver as soon as Exynos gets migrated to use - * cpufreq-dt driver. - */ - np = of_find_compatible_node(NULL, NULL, "samsung,exynos4412-clock"); - if (!np) { - pr_err("%s: failed to find clock controller DT node\n", - __func__); - return -ENODEV; - } - - info->cmu_regs = of_iomap(np, 0); - if (!info->cmu_regs) { - pr_err("%s: failed to map CMU registers\n", __func__); - return -EFAULT; - } - - cpu_clk = clk_get(NULL, "armclk"); - if (IS_ERR(cpu_clk)) - return PTR_ERR(cpu_clk); - - moutcore = clk_get(NULL, "moutcore"); - if (IS_ERR(moutcore)) - goto err_moutcore; - - mout_mpll = clk_get(NULL, "mout_mpll"); - if (IS_ERR(mout_mpll)) - goto err_mout_mpll; - - rate = clk_get_rate(mout_mpll) / 1000; - - mout_apll = clk_get(NULL, "mout_apll"); - if (IS_ERR(mout_apll)) - goto err_mout_apll; - - if (info->type == EXYNOS_SOC_4212) - apll_freq_4x12 = apll_freq_4212; - else - apll_freq_4x12 = apll_freq_4412; - - info->mpll_freq_khz = rate; - /* 800Mhz */ - info->pll_safe_idx = L7; - info->cpu_clk = cpu_clk; - info->volt_table = exynos4x12_volt_table; - info->freq_table = exynos4x12_freq_table; - info->set_freq = exynos4x12_set_frequency; - - cpufreq = info; - - return 0; - -err_mout_apll: - clk_put(mout_mpll); -err_mout_mpll: - clk_put(moutcore); -err_moutcore: - clk_put(cpu_clk); - - pr_debug("%s: failed initialization\n", __func__); - return -EINVAL; -} diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c deleted file mode 100644 index 3eafdc7ba..000000000 --- a/drivers/cpufreq/exynos5250-cpufreq.c +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Copyright (c) 2010-20122Samsung Electronics Co., Ltd. - * http://www.samsung.com - * - * EXYNOS5250 - CPU frequency scaling support - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "exynos-cpufreq.h" - -static struct clk *cpu_clk; -static struct clk *moutcore; -static struct clk *mout_mpll; -static struct clk *mout_apll; -static struct exynos_dvfs_info *cpufreq; - -static unsigned int exynos5250_volt_table[] = { - 1300000, 1250000, 1225000, 1200000, 1150000, - 1125000, 1100000, 1075000, 1050000, 1025000, - 1012500, 1000000, 975000, 950000, 937500, - 925000 -}; - -static struct cpufreq_frequency_table exynos5250_freq_table[] = { - {0, L0, 1700 * 1000}, - {0, L1, 1600 * 1000}, - {0, L2, 1500 * 1000}, - {0, L3, 1400 * 1000}, - {0, L4, 1300 * 1000}, - {0, L5, 1200 * 1000}, - {0, L6, 1100 * 1000}, - {0, L7, 1000 * 1000}, - {0, L8, 900 * 1000}, - {0, L9, 800 * 1000}, - {0, L10, 700 * 1000}, - {0, L11, 600 * 1000}, - {0, L12, 500 * 1000}, - {0, L13, 400 * 1000}, - {0, L14, 300 * 1000}, - {0, L15, 200 * 1000}, - {0, 0, CPUFREQ_TABLE_END}, -}; - -static struct apll_freq apll_freq_5250[] = { - /* - * values: - * freq - * clock divider for ARM, CPUD, ACP, PERIPH, ATB, PCLK_DBG, APLL, ARM2 - * clock divider for COPY, HPM, RESERVED - * PLL M, P, S - */ - APLL_FREQ(1700, 0, 3, 7, 7, 7, 3, 5, 0, 0, 2, 0, 425, 6, 0), - APLL_FREQ(1600, 0, 3, 7, 7, 7, 1, 4, 0, 0, 2, 0, 200, 3, 0), - APLL_FREQ(1500, 0, 2, 7, 7, 7, 1, 4, 0, 0, 2, 0, 250, 4, 0), - APLL_FREQ(1400, 0, 2, 7, 7, 6, 1, 4, 0, 0, 2, 0, 175, 3, 0), - APLL_FREQ(1300, 0, 2, 7, 7, 6, 1, 3, 0, 0, 2, 0, 325, 6, 0), - APLL_FREQ(1200, 0, 2, 7, 7, 5, 1, 3, 0, 0, 2, 0, 200, 4, 0), - APLL_FREQ(1100, 0, 3, 7, 7, 5, 1, 3, 0, 0, 2, 0, 275, 6, 0), - APLL_FREQ(1000, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 125, 3, 0), - APLL_FREQ(900, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 150, 4, 0), - APLL_FREQ(800, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 100, 3, 0), - APLL_FREQ(700, 0, 1, 7, 7, 3, 1, 1, 0, 0, 2, 0, 175, 3, 1), - APLL_FREQ(600, 0, 1, 7, 7, 3, 1, 1, 0, 0, 2, 0, 200, 4, 1), - APLL_FREQ(500, 0, 1, 7, 7, 2, 1, 1, 0, 0, 2, 0, 125, 3, 1), - APLL_FREQ(400, 0, 1, 7, 7, 2, 1, 1, 0, 0, 2, 0, 100, 3, 1), - APLL_FREQ(300, 0, 1, 7, 7, 1, 1, 1, 0, 0, 2, 0, 200, 4, 2), - APLL_FREQ(200, 0, 1, 7, 7, 1, 1, 1, 0, 0, 2, 0, 100, 3, 2), -}; - -static void set_clkdiv(unsigned int div_index) -{ - unsigned int tmp; - - /* Change Divider - CPU0 */ - - tmp = apll_freq_5250[div_index].clk_div_cpu0; - - __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU0); - - while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU0) - & 0x11111111) - cpu_relax(); - - /* Change Divider - CPU1 */ - tmp = apll_freq_5250[div_index].clk_div_cpu1; - - __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU1); - - while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU1) & 0x11) - cpu_relax(); -} - -static void set_apll(unsigned int index) -{ - unsigned int tmp; - unsigned int freq = apll_freq_5250[index].freq; - - /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ - clk_set_parent(moutcore, mout_mpll); - - do { - cpu_relax(); - tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU) - >> 16); - tmp &= 0x7; - } while (tmp != 0x2); - - clk_set_rate(mout_apll, freq * 1000); - - /* MUX_CORE_SEL = APLL */ - clk_set_parent(moutcore, mout_apll); - - do { - cpu_relax(); - tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU); - tmp &= (0x7 << 16); - } while (tmp != (0x1 << 16)); -} - -static void exynos5250_set_frequency(unsigned int old_index, - unsigned int new_index) -{ - if (old_index > new_index) { - set_clkdiv(new_index); - set_apll(new_index); - } else if (old_index < new_index) { - set_apll(new_index); - set_clkdiv(new_index); - } -} - -int exynos5250_cpufreq_init(struct exynos_dvfs_info *info) -{ - struct device_node *np; - unsigned long rate; - - /* - * HACK: This is a temporary workaround to get access to clock - * controller registers directly and remove static mappings and - * dependencies on platform headers. It is necessary to enable - * Exynos multi-platform support and will be removed together with - * this whole driver as soon as Exynos gets migrated to use - * cpufreq-dt driver. - */ - np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-clock"); - if (!np) { - pr_err("%s: failed to find clock controller DT node\n", - __func__); - return -ENODEV; - } - - info->cmu_regs = of_iomap(np, 0); - if (!info->cmu_regs) { - pr_err("%s: failed to map CMU registers\n", __func__); - return -EFAULT; - } - - cpu_clk = clk_get(NULL, "armclk"); - if (IS_ERR(cpu_clk)) - return PTR_ERR(cpu_clk); - - moutcore = clk_get(NULL, "mout_cpu"); - if (IS_ERR(moutcore)) - goto err_moutcore; - - mout_mpll = clk_get(NULL, "mout_mpll"); - if (IS_ERR(mout_mpll)) - goto err_mout_mpll; - - rate = clk_get_rate(mout_mpll) / 1000; - - mout_apll = clk_get(NULL, "mout_apll"); - if (IS_ERR(mout_apll)) - goto err_mout_apll; - - info->mpll_freq_khz = rate; - /* 800Mhz */ - info->pll_safe_idx = L9; - info->cpu_clk = cpu_clk; - info->volt_table = exynos5250_volt_table; - info->freq_table = exynos5250_freq_table; - info->set_freq = exynos5250_set_frequency; - - cpufreq = info; - - return 0; - -err_mout_apll: - clk_put(mout_mpll); -err_mout_mpll: - clk_put(moutcore); -err_moutcore: - clk_put(cpu_clk); - - pr_err("%s: failed initialization\n", __func__); - return -EINVAL; -} diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index dfbbf981e..a8f1daffc 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -18,6 +18,21 @@ * FREQUENCY TABLE HELPERS * *********************************************************************/ +bool policy_has_boost_freq(struct cpufreq_policy *policy) +{ + struct cpufreq_frequency_table *pos, *table = policy->freq_table; + + if (!table) + return false; + + cpufreq_for_each_valid_entry(pos, table) + if (pos->flags & CPUFREQ_BOOST_FREQ) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(policy_has_boost_freq); + int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table) { diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c index c30aaa6a5..0202429f1 100644 --- a/drivers/cpufreq/ia64-acpi-cpufreq.c +++ b/drivers/cpufreq/ia64-acpi-cpufreq.c @@ -29,7 +29,6 @@ MODULE_LICENSE("GPL"); struct cpufreq_acpi_io { struct acpi_processor_performance acpi_data; - struct cpufreq_frequency_table *freq_table; unsigned int resume; }; @@ -221,6 +220,7 @@ acpi_cpufreq_cpu_init ( unsigned int cpu = policy->cpu; struct cpufreq_acpi_io *data; unsigned int result = 0; + struct cpufreq_frequency_table *freq_table; pr_debug("acpi_cpufreq_cpu_init\n"); @@ -254,10 +254,10 @@ acpi_cpufreq_cpu_init ( } /* alloc freq_table */ - data->freq_table = kzalloc(sizeof(*data->freq_table) * + freq_table = kzalloc(sizeof(*freq_table) * (data->acpi_data.state_count + 1), GFP_KERNEL); - if (!data->freq_table) { + if (!freq_table) { result = -ENOMEM; goto err_unreg; } @@ -276,14 +276,14 @@ acpi_cpufreq_cpu_init ( for (i = 0; i <= data->acpi_data.state_count; i++) { if (i < data->acpi_data.state_count) { - data->freq_table[i].frequency = + freq_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; } else { - data->freq_table[i].frequency = CPUFREQ_TABLE_END; + freq_table[i].frequency = CPUFREQ_TABLE_END; } } - result = cpufreq_table_validate_and_show(policy, data->freq_table); + result = cpufreq_table_validate_and_show(policy, freq_table); if (result) { goto err_freqfree; } @@ -311,9 +311,9 @@ acpi_cpufreq_cpu_init ( return (result); err_freqfree: - kfree(data->freq_table); + kfree(freq_table); err_unreg: - acpi_processor_unregister_performance(&data->acpi_data, cpu); + acpi_processor_unregister_performance(cpu); err_free: kfree(data); acpi_io_data[cpu] = NULL; @@ -332,8 +332,8 @@ acpi_cpufreq_cpu_exit ( if (data) { acpi_io_data[policy->cpu] = NULL; - acpi_processor_unregister_performance(&data->acpi_data, - policy->cpu); + acpi_processor_unregister_performance(policy->cpu); + kfree(policy->freq_table); kfree(data); } diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c index 129e266f7..2faa4216b 100644 --- a/drivers/cpufreq/integrator-cpufreq.c +++ b/drivers/cpufreq/integrator-cpufreq.c @@ -98,11 +98,10 @@ static int integrator_set_target(struct cpufreq_policy *policy, /* get current setting */ cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET); - if (machine_is_integrator()) { + if (machine_is_integrator()) vco.s = (cm_osc >> 8) & 7; - } else if (machine_is_cintegrator()) { + else if (machine_is_cintegrator()) vco.s = 1; - } vco.v = cm_osc & 255; vco.r = 22; freqs.old = icst_hz(&cclk_params, vco) / 1000; @@ -163,11 +162,10 @@ static unsigned int integrator_get(unsigned int cpu) /* detect memory etc. */ cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET); - if (machine_is_integrator()) { + if (machine_is_integrator()) vco.s = (cm_osc >> 8) & 7; - } else { + else vco.s = 1; - } vco.v = cm_osc & 255; vco.r = 22; @@ -203,7 +201,7 @@ static int __init integrator_cpufreq_probe(struct platform_device *pdev) struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) + if (!res) return -ENODEV; cm_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); @@ -234,6 +232,6 @@ static struct platform_driver integrator_cpufreq_driver = { module_platform_driver_probe(integrator_cpufreq_driver, integrator_cpufreq_probe); -MODULE_AUTHOR ("Russell M. King"); -MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs"); -MODULE_LICENSE ("GPL"); +MODULE_AUTHOR("Russell M. King"); +MODULE_DESCRIPTION("cpufreq driver for ARM Integrator CPUs"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index fcb929ec5..fec23702b 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -260,24 +260,31 @@ static inline void update_turbo_state(void) cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); } -#define PCT_TO_HWP(x) (x * 255 / 100) static void intel_pstate_hwp_set(void) { - int min, max, cpu; - u64 value, freq; + int min, hw_min, max, hw_max, cpu, range, adj_range; + u64 value, cap; + + rdmsrl(MSR_HWP_CAPABILITIES, cap); + hw_min = HWP_LOWEST_PERF(cap); + hw_max = HWP_HIGHEST_PERF(cap); + range = hw_max - hw_min; get_online_cpus(); for_each_online_cpu(cpu) { rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); - min = PCT_TO_HWP(limits.min_perf_pct); + adj_range = limits.min_perf_pct * range / 100; + min = hw_min + adj_range; value &= ~HWP_MIN_PERF(~0L); value |= HWP_MIN_PERF(min); - max = PCT_TO_HWP(limits.max_perf_pct); + adj_range = limits.max_perf_pct * range / 100; + max = hw_min + adj_range; if (limits.no_turbo) { - rdmsrl( MSR_HWP_CAPABILITIES, freq); - max = HWP_GUARANTEED_PERF(freq); + hw_max = HWP_GUARANTEED_PERF(cap); + if (hw_max < max) + max = hw_max; } value &= ~HWP_MAX_PERF(~0L); @@ -423,6 +430,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); + limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct); + limits.max_perf_pct = max(limits.min_perf_pct, limits.max_perf_pct); limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); if (hwp_active) @@ -442,6 +451,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, limits.min_sysfs_pct = clamp_t(int, input, 0 , 100); limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); + limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct); + limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct); limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); if (hwp_active) @@ -484,12 +495,11 @@ static void __init intel_pstate_sysfs_expose_params(void) } /************************** sysfs end ************************/ -static void intel_pstate_hwp_enable(void) +static void intel_pstate_hwp_enable(struct cpudata *cpudata) { - hwp_active++; pr_info("intel_pstate: HWP enabled\n"); - wrmsrl( MSR_PM_ENABLE, 0x1); + wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); } static int byt_get_min_pstate(void) @@ -522,7 +532,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) int32_t vid_fp; u32 vid; - val = pstate << 8; + val = (u64)pstate << 8; if (limits.no_turbo && !limits.turbo_disabled) val |= (u64)1 << 32; @@ -533,8 +543,13 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); vid = ceiling_fp(vid_fp); - if (pstate > cpudata->pstate.max_pstate) - vid = cpudata->vid.turbo; + if (pstate < cpudata->pstate.max_pstate) + cpu_scaling(cpudata->cpu); + else { + if (pstate > cpudata->pstate.max_pstate) + vid = cpudata->vid.turbo; + cpu_nonscaling(cpudata->cpu); + } val |= vid; @@ -611,7 +626,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate) { u64 val; - val = pstate << 8; + val = (u64)pstate << 8; if (limits.no_turbo && !limits.turbo_disabled) val |= (u64)1 << 32; @@ -766,7 +781,12 @@ static inline void intel_pstate_sample(struct cpudata *cpu) local_irq_save(flags); rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); - tsc = native_read_tsc(); + if (cpu->prev_mperf == mperf) { + local_irq_restore(flags); + return; + } + + tsc = rdtsc(); local_irq_restore(flags); cpu->last_sample_time = cpu->sample.time; @@ -909,6 +929,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(0x4c, byt_params), ICPU(0x4e, core_params), ICPU(0x4f, core_params), + ICPU(0x5e, core_params), ICPU(0x56, core_params), ICPU(0x57, knl_params), {} @@ -933,6 +954,10 @@ static int intel_pstate_init_cpu(unsigned int cpunum) cpu = all_cpu_data[cpunum]; cpu->cpu = cpunum; + + if (hwp_active) + intel_pstate_hwp_enable(cpu); + intel_pstate_get_cpu_pstates(cpu); init_timer_deferrable(&cpu->timer); @@ -985,12 +1010,19 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100); - limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); - limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); - limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); + + /* Normalize user input to [min_policy_pct, max_policy_pct] */ + limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); + limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct); limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); + limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct); + + /* Make sure min_perf_pct <= max_perf_pct */ + limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct); + + limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); if (hwp_active) @@ -1170,6 +1202,10 @@ static struct hw_vendor_info vendor_info[] = { {1, "ORACLE", "X4270M3 ", PPC}, {1, "ORACLE", "X4270M2 ", PPC}, {1, "ORACLE", "X4170M2 ", PPC}, + {1, "ORACLE", "X4170 M3", PPC}, + {1, "ORACLE", "X4275 M3", PPC}, + {1, "ORACLE", "X6-2 ", PPC}, + {1, "ORACLE", "Sudbury ", PPC}, {0, "", ""}, }; @@ -1246,7 +1282,7 @@ static int __init intel_pstate_init(void) return -ENOMEM; if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) - intel_pstate_hwp_enable(); + hwp_active++; if (!hwp_active && hwp_only) goto out; diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c new file mode 100644 index 000000000..49caed293 --- /dev/null +++ b/drivers/cpufreq/mt8173-cpufreq.c @@ -0,0 +1,527 @@ +/* + * Copyright (c) 2015 Linaro Ltd. + * Author: Pi-Cheng Chen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MIN_VOLT_SHIFT (100000) +#define MAX_VOLT_SHIFT (200000) +#define MAX_VOLT_LIMIT (1150000) +#define VOLT_TOL (10000) + +/* + * The struct mtk_cpu_dvfs_info holds necessary information for doing CPU DVFS + * on each CPU power/clock domain of Mediatek SoCs. Each CPU cluster in + * Mediatek SoCs has two voltage inputs, Vproc and Vsram. In some cases the two + * voltage inputs need to be controlled under a hardware limitation: + * 100mV < Vsram - Vproc < 200mV + * + * When scaling the clock frequency of a CPU clock domain, the clock source + * needs to be switched to another stable PLL clock temporarily until + * the original PLL becomes stable at target frequency. + */ +struct mtk_cpu_dvfs_info { + struct device *cpu_dev; + struct regulator *proc_reg; + struct regulator *sram_reg; + struct clk *cpu_clk; + struct clk *inter_clk; + struct thermal_cooling_device *cdev; + int intermediate_voltage; + bool need_voltage_tracking; +}; + +static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info, + int new_vproc) +{ + struct regulator *proc_reg = info->proc_reg; + struct regulator *sram_reg = info->sram_reg; + int old_vproc, old_vsram, new_vsram, vsram, vproc, ret; + + old_vproc = regulator_get_voltage(proc_reg); + old_vsram = regulator_get_voltage(sram_reg); + /* Vsram should not exceed the maximum allowed voltage of SoC. */ + new_vsram = min(new_vproc + MIN_VOLT_SHIFT, MAX_VOLT_LIMIT); + + if (old_vproc < new_vproc) { + /* + * When scaling up voltages, Vsram and Vproc scale up step + * by step. At each step, set Vsram to (Vproc + 200mV) first, + * then set Vproc to (Vsram - 100mV). + * Keep doing it until Vsram and Vproc hit target voltages. + */ + do { + old_vsram = regulator_get_voltage(sram_reg); + old_vproc = regulator_get_voltage(proc_reg); + + vsram = min(new_vsram, old_vproc + MAX_VOLT_SHIFT); + + if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) { + vsram = MAX_VOLT_LIMIT; + + /* + * If the target Vsram hits the maximum voltage, + * try to set the exact voltage value first. + */ + ret = regulator_set_voltage(sram_reg, vsram, + vsram); + if (ret) + ret = regulator_set_voltage(sram_reg, + vsram - VOLT_TOL, + vsram); + + vproc = new_vproc; + } else { + ret = regulator_set_voltage(sram_reg, vsram, + vsram + VOLT_TOL); + + vproc = vsram - MIN_VOLT_SHIFT; + } + if (ret) + return ret; + + ret = regulator_set_voltage(proc_reg, vproc, + vproc + VOLT_TOL); + if (ret) { + regulator_set_voltage(sram_reg, old_vsram, + old_vsram); + return ret; + } + } while (vproc < new_vproc || vsram < new_vsram); + } else if (old_vproc > new_vproc) { + /* + * When scaling down voltages, Vsram and Vproc scale down step + * by step. At each step, set Vproc to (Vsram - 200mV) first, + * then set Vproc to (Vproc + 100mV). + * Keep doing it until Vsram and Vproc hit target voltages. + */ + do { + old_vproc = regulator_get_voltage(proc_reg); + old_vsram = regulator_get_voltage(sram_reg); + + vproc = max(new_vproc, old_vsram - MAX_VOLT_SHIFT); + ret = regulator_set_voltage(proc_reg, vproc, + vproc + VOLT_TOL); + if (ret) + return ret; + + if (vproc == new_vproc) + vsram = new_vsram; + else + vsram = max(new_vsram, vproc + MIN_VOLT_SHIFT); + + if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) { + vsram = MAX_VOLT_LIMIT; + + /* + * If the target Vsram hits the maximum voltage, + * try to set the exact voltage value first. + */ + ret = regulator_set_voltage(sram_reg, vsram, + vsram); + if (ret) + ret = regulator_set_voltage(sram_reg, + vsram - VOLT_TOL, + vsram); + } else { + ret = regulator_set_voltage(sram_reg, vsram, + vsram + VOLT_TOL); + } + + if (ret) { + regulator_set_voltage(proc_reg, old_vproc, + old_vproc); + return ret; + } + } while (vproc > new_vproc + VOLT_TOL || + vsram > new_vsram + VOLT_TOL); + } + + return 0; +} + +static int mtk_cpufreq_set_voltage(struct mtk_cpu_dvfs_info *info, int vproc) +{ + if (info->need_voltage_tracking) + return mtk_cpufreq_voltage_tracking(info, vproc); + else + return regulator_set_voltage(info->proc_reg, vproc, + vproc + VOLT_TOL); +} + +static int mtk_cpufreq_set_target(struct cpufreq_policy *policy, + unsigned int index) +{ + struct cpufreq_frequency_table *freq_table = policy->freq_table; + struct clk *cpu_clk = policy->clk; + struct clk *armpll = clk_get_parent(cpu_clk); + struct mtk_cpu_dvfs_info *info = policy->driver_data; + struct device *cpu_dev = info->cpu_dev; + struct dev_pm_opp *opp; + long freq_hz, old_freq_hz; + int vproc, old_vproc, inter_vproc, target_vproc, ret; + + inter_vproc = info->intermediate_voltage; + + old_freq_hz = clk_get_rate(cpu_clk); + old_vproc = regulator_get_voltage(info->proc_reg); + + freq_hz = freq_table[index].frequency * 1000; + + rcu_read_lock(); + opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz); + if (IS_ERR(opp)) { + rcu_read_unlock(); + pr_err("cpu%d: failed to find OPP for %ld\n", + policy->cpu, freq_hz); + return PTR_ERR(opp); + } + vproc = dev_pm_opp_get_voltage(opp); + rcu_read_unlock(); + + /* + * If the new voltage or the intermediate voltage is higher than the + * current voltage, scale up voltage first. + */ + target_vproc = (inter_vproc > vproc) ? inter_vproc : vproc; + if (old_vproc < target_vproc) { + ret = mtk_cpufreq_set_voltage(info, target_vproc); + if (ret) { + pr_err("cpu%d: failed to scale up voltage!\n", + policy->cpu); + mtk_cpufreq_set_voltage(info, old_vproc); + return ret; + } + } + + /* Reparent the CPU clock to intermediate clock. */ + ret = clk_set_parent(cpu_clk, info->inter_clk); + if (ret) { + pr_err("cpu%d: failed to re-parent cpu clock!\n", + policy->cpu); + mtk_cpufreq_set_voltage(info, old_vproc); + WARN_ON(1); + return ret; + } + + /* Set the original PLL to target rate. */ + ret = clk_set_rate(armpll, freq_hz); + if (ret) { + pr_err("cpu%d: failed to scale cpu clock rate!\n", + policy->cpu); + clk_set_parent(cpu_clk, armpll); + mtk_cpufreq_set_voltage(info, old_vproc); + return ret; + } + + /* Set parent of CPU clock back to the original PLL. */ + ret = clk_set_parent(cpu_clk, armpll); + if (ret) { + pr_err("cpu%d: failed to re-parent cpu clock!\n", + policy->cpu); + mtk_cpufreq_set_voltage(info, inter_vproc); + WARN_ON(1); + return ret; + } + + /* + * If the new voltage is lower than the intermediate voltage or the + * original voltage, scale down to the new voltage. + */ + if (vproc < inter_vproc || vproc < old_vproc) { + ret = mtk_cpufreq_set_voltage(info, vproc); + if (ret) { + pr_err("cpu%d: failed to scale down voltage!\n", + policy->cpu); + clk_set_parent(cpu_clk, info->inter_clk); + clk_set_rate(armpll, old_freq_hz); + clk_set_parent(cpu_clk, armpll); + return ret; + } + } + + return 0; +} + +static void mtk_cpufreq_ready(struct cpufreq_policy *policy) +{ + struct mtk_cpu_dvfs_info *info = policy->driver_data; + struct device_node *np = of_node_get(info->cpu_dev->of_node); + + if (WARN_ON(!np)) + return; + + if (of_find_property(np, "#cooling-cells", NULL)) { + info->cdev = of_cpufreq_cooling_register(np, + policy->related_cpus); + + if (IS_ERR(info->cdev)) { + dev_err(info->cpu_dev, + "running cpufreq without cooling device: %ld\n", + PTR_ERR(info->cdev)); + + info->cdev = NULL; + } + } + + of_node_put(np); +} + +static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) +{ + struct device *cpu_dev; + struct regulator *proc_reg = ERR_PTR(-ENODEV); + struct regulator *sram_reg = ERR_PTR(-ENODEV); + struct clk *cpu_clk = ERR_PTR(-ENODEV); + struct clk *inter_clk = ERR_PTR(-ENODEV); + struct dev_pm_opp *opp; + unsigned long rate; + int ret; + + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + pr_err("failed to get cpu%d device\n", cpu); + return -ENODEV; + } + + cpu_clk = clk_get(cpu_dev, "cpu"); + if (IS_ERR(cpu_clk)) { + if (PTR_ERR(cpu_clk) == -EPROBE_DEFER) + pr_warn("cpu clk for cpu%d not ready, retry.\n", cpu); + else + pr_err("failed to get cpu clk for cpu%d\n", cpu); + + ret = PTR_ERR(cpu_clk); + return ret; + } + + inter_clk = clk_get(cpu_dev, "intermediate"); + if (IS_ERR(inter_clk)) { + if (PTR_ERR(inter_clk) == -EPROBE_DEFER) + pr_warn("intermediate clk for cpu%d not ready, retry.\n", + cpu); + else + pr_err("failed to get intermediate clk for cpu%d\n", + cpu); + + ret = PTR_ERR(inter_clk); + goto out_free_resources; + } + + proc_reg = regulator_get_exclusive(cpu_dev, "proc"); + if (IS_ERR(proc_reg)) { + if (PTR_ERR(proc_reg) == -EPROBE_DEFER) + pr_warn("proc regulator for cpu%d not ready, retry.\n", + cpu); + else + pr_err("failed to get proc regulator for cpu%d\n", + cpu); + + ret = PTR_ERR(proc_reg); + goto out_free_resources; + } + + /* Both presence and absence of sram regulator are valid cases. */ + sram_reg = regulator_get_exclusive(cpu_dev, "sram"); + + ret = of_init_opp_table(cpu_dev); + if (ret) { + pr_warn("no OPP table for cpu%d\n", cpu); + goto out_free_resources; + } + + /* Search a safe voltage for intermediate frequency. */ + rate = clk_get_rate(inter_clk); + rcu_read_lock(); + opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate); + if (IS_ERR(opp)) { + rcu_read_unlock(); + pr_err("failed to get intermediate opp for cpu%d\n", cpu); + ret = PTR_ERR(opp); + goto out_free_opp_table; + } + info->intermediate_voltage = dev_pm_opp_get_voltage(opp); + rcu_read_unlock(); + + info->cpu_dev = cpu_dev; + info->proc_reg = proc_reg; + info->sram_reg = IS_ERR(sram_reg) ? NULL : sram_reg; + info->cpu_clk = cpu_clk; + info->inter_clk = inter_clk; + + /* + * If SRAM regulator is present, software "voltage tracking" is needed + * for this CPU power domain. + */ + info->need_voltage_tracking = !IS_ERR(sram_reg); + + return 0; + +out_free_opp_table: + of_free_opp_table(cpu_dev); + +out_free_resources: + if (!IS_ERR(proc_reg)) + regulator_put(proc_reg); + if (!IS_ERR(sram_reg)) + regulator_put(sram_reg); + if (!IS_ERR(cpu_clk)) + clk_put(cpu_clk); + if (!IS_ERR(inter_clk)) + clk_put(inter_clk); + + return ret; +} + +static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info) +{ + if (!IS_ERR(info->proc_reg)) + regulator_put(info->proc_reg); + if (!IS_ERR(info->sram_reg)) + regulator_put(info->sram_reg); + if (!IS_ERR(info->cpu_clk)) + clk_put(info->cpu_clk); + if (!IS_ERR(info->inter_clk)) + clk_put(info->inter_clk); + + of_free_opp_table(info->cpu_dev); +} + +static int mtk_cpufreq_init(struct cpufreq_policy *policy) +{ + struct mtk_cpu_dvfs_info *info; + struct cpufreq_frequency_table *freq_table; + int ret; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + ret = mtk_cpu_dvfs_info_init(info, policy->cpu); + if (ret) { + pr_err("%s failed to initialize dvfs info for cpu%d\n", + __func__, policy->cpu); + goto out_free_dvfs_info; + } + + ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table); + if (ret) { + pr_err("failed to init cpufreq table for cpu%d: %d\n", + policy->cpu, ret); + goto out_release_dvfs_info; + } + + ret = cpufreq_table_validate_and_show(policy, freq_table); + if (ret) { + pr_err("%s: invalid frequency table: %d\n", __func__, ret); + goto out_free_cpufreq_table; + } + + /* CPUs in the same cluster share a clock and power domain. */ + cpumask_copy(policy->cpus, &cpu_topology[policy->cpu].core_sibling); + policy->driver_data = info; + policy->clk = info->cpu_clk; + + return 0; + +out_free_cpufreq_table: + dev_pm_opp_free_cpufreq_table(info->cpu_dev, &freq_table); + +out_release_dvfs_info: + mtk_cpu_dvfs_info_release(info); + +out_free_dvfs_info: + kfree(info); + + return ret; +} + +static int mtk_cpufreq_exit(struct cpufreq_policy *policy) +{ + struct mtk_cpu_dvfs_info *info = policy->driver_data; + + cpufreq_cooling_unregister(info->cdev); + dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table); + mtk_cpu_dvfs_info_release(info); + kfree(info); + + return 0; +} + +static struct cpufreq_driver mt8173_cpufreq_driver = { + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = mtk_cpufreq_set_target, + .get = cpufreq_generic_get, + .init = mtk_cpufreq_init, + .exit = mtk_cpufreq_exit, + .ready = mtk_cpufreq_ready, + .name = "mtk-cpufreq", + .attr = cpufreq_generic_attr, +}; + +static int mt8173_cpufreq_probe(struct platform_device *pdev) +{ + int ret; + + ret = cpufreq_register_driver(&mt8173_cpufreq_driver); + if (ret) + pr_err("failed to register mtk cpufreq driver\n"); + + return ret; +} + +static struct platform_driver mt8173_cpufreq_platdrv = { + .driver = { + .name = "mt8173-cpufreq", + }, + .probe = mt8173_cpufreq_probe, +}; + +static int mt8173_cpufreq_driver_init(void) +{ + struct platform_device *pdev; + int err; + + if (!of_machine_is_compatible("mediatek,mt8173")) + return -ENODEV; + + err = platform_driver_register(&mt8173_cpufreq_platdrv); + if (err) + return err; + + /* + * Since there's no place to hold device registration code and no + * device tree based way to match cpufreq driver yet, both the driver + * and the device registration codes are put here to handle defer + * probing. + */ + pdev = platform_device_register_simple("mt8173-cpufreq", -1, NULL, 0); + if (IS_ERR(pdev)) { + pr_err("failed to register mtk-cpufreq platform device\n"); + return PTR_ERR(pdev); + } + + return 0; +} +device_initcall(mt8173_cpufreq_driver_init); diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 37c574248..c1ae19997 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c @@ -421,7 +421,7 @@ static int powernow_acpi_init(void) return 0; err2: - acpi_processor_unregister_performance(acpi_processor_perf, 0); + acpi_processor_unregister_performance(0); err1: free_cpumask_var(acpi_processor_perf->shared_cpu_map); err05: @@ -661,7 +661,7 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy) { #ifdef CONFIG_X86_POWERNOW_K7_ACPI if (acpi_processor_perf) { - acpi_processor_unregister_performance(acpi_processor_perf, 0); + acpi_processor_unregister_performance(0); free_cpumask_var(acpi_processor_perf->shared_cpu_map); kfree(acpi_processor_perf); } diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 5c035d04d..0b5bf135b 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -795,7 +795,7 @@ err_out_mem: kfree(powernow_table); err_out: - acpi_processor_unregister_performance(&data->acpi_data, data->cpu); + acpi_processor_unregister_performance(data->cpu); /* data->acpi_data.state_count informs us at ->exit() * whether ACPI was used */ @@ -863,8 +863,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { if (data->acpi_data.state_count) - acpi_processor_unregister_performance(&data->acpi_data, - data->cpu); + acpi_processor_unregister_performance(data->cpu); free_cpumask_var(data->acpi_data.shared_cpu_map); } diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index ebef0d827..64994e106 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -27,20 +27,31 @@ #include #include #include +#include #include #include #include #include /* Required for cpu_sibling_mask() in UP configs */ +#include #define POWERNV_MAX_PSTATES 256 #define PMSR_PSAFE_ENABLE (1UL << 30) #define PMSR_SPR_EM_DISABLE (1UL << 31) #define PMSR_MAX(x) ((x >> 32) & 0xFF) -#define PMSR_LP(x) ((x >> 48) & 0xFF) static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; -static bool rebooting, throttled; +static bool rebooting, throttled, occ_reset; + +static struct chip { + unsigned int id; + bool throttled; + cpumask_t mask; + struct work_struct throttle; + bool restore; +} *chips; + +static int nr_chips; /* * Note: The set of pstates consists of contiguous integers, the @@ -298,28 +309,35 @@ static inline unsigned int get_nominal_index(void) return powernv_pstate_info.max - powernv_pstate_info.nominal; } -static void powernv_cpufreq_throttle_check(unsigned int cpu) +static void powernv_cpufreq_throttle_check(void *data) { + unsigned int cpu = smp_processor_id(); unsigned long pmsr; - int pmsr_pmax, pmsr_lp; + int pmsr_pmax, i; pmsr = get_pmspr(SPRN_PMSR); + for (i = 0; i < nr_chips; i++) + if (chips[i].id == cpu_to_chip_id(cpu)) + break; + /* Check for Pmax Capping */ pmsr_pmax = (s8)PMSR_MAX(pmsr); if (pmsr_pmax != powernv_pstate_info.max) { - throttled = true; - pr_info("CPU %d Pmax is reduced to %d\n", cpu, pmsr_pmax); - pr_info("Max allowed Pstate is capped\n"); + if (chips[i].throttled) + goto next; + chips[i].throttled = true; + pr_info("CPU %d on Chip %u has Pmax reduced to %d\n", cpu, + chips[i].id, pmsr_pmax); + } else if (chips[i].throttled) { + chips[i].throttled = false; + pr_info("CPU %d on Chip %u has Pmax restored to %d\n", cpu, + chips[i].id, pmsr_pmax); } - /* - * Check for Psafe by reading LocalPstate - * or check if Psafe_mode_active is set in PMSR. - */ - pmsr_lp = (s8)PMSR_LP(pmsr); - if ((pmsr_lp < powernv_pstate_info.min) || - (pmsr & PMSR_PSAFE_ENABLE)) { + /* Check if Psafe_mode_active is set in PMSR. */ +next: + if (pmsr & PMSR_PSAFE_ENABLE) { throttled = true; pr_info("Pstate set to safe frequency\n"); } @@ -350,7 +368,7 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy, return 0; if (!throttled) - powernv_cpufreq_throttle_check(smp_processor_id()); + powernv_cpufreq_throttle_check(NULL); freq_data.pstate_id = powernv_freqs[new_index].driver_data; @@ -395,6 +413,119 @@ static struct notifier_block powernv_cpufreq_reboot_nb = { .notifier_call = powernv_cpufreq_reboot_notifier, }; +void powernv_cpufreq_work_fn(struct work_struct *work) +{ + struct chip *chip = container_of(work, struct chip, throttle); + unsigned int cpu; + cpumask_var_t mask; + + smp_call_function_any(&chip->mask, + powernv_cpufreq_throttle_check, NULL, 0); + + if (!chip->restore) + return; + + chip->restore = false; + cpumask_copy(mask, &chip->mask); + for_each_cpu_and(cpu, mask, cpu_online_mask) { + int index, tcpu; + struct cpufreq_policy policy; + + cpufreq_get_policy(&policy, cpu); + cpufreq_frequency_table_target(&policy, policy.freq_table, + policy.cur, + CPUFREQ_RELATION_C, &index); + powernv_cpufreq_target_index(&policy, index); + for_each_cpu(tcpu, policy.cpus) + cpumask_clear_cpu(tcpu, mask); + } +} + +static char throttle_reason[][30] = { + "No throttling", + "Power Cap", + "Processor Over Temperature", + "Power Supply Failure", + "Over Current", + "OCC Reset" + }; + +static int powernv_cpufreq_occ_msg(struct notifier_block *nb, + unsigned long msg_type, void *_msg) +{ + struct opal_msg *msg = _msg; + struct opal_occ_msg omsg; + int i; + + if (msg_type != OPAL_MSG_OCC) + return 0; + + omsg.type = be64_to_cpu(msg->params[0]); + + switch (omsg.type) { + case OCC_RESET: + occ_reset = true; + pr_info("OCC (On Chip Controller - enforces hard thermal/power limits) Resetting\n"); + /* + * powernv_cpufreq_throttle_check() is called in + * target() callback which can detect the throttle state + * for governors like ondemand. + * But static governors will not call target() often thus + * report throttling here. + */ + if (!throttled) { + throttled = true; + pr_crit("CPU frequency is throttled for duration\n"); + } + + break; + case OCC_LOAD: + pr_info("OCC Loading, CPU frequency is throttled until OCC is started\n"); + break; + case OCC_THROTTLE: + omsg.chip = be64_to_cpu(msg->params[1]); + omsg.throttle_status = be64_to_cpu(msg->params[2]); + + if (occ_reset) { + occ_reset = false; + throttled = false; + pr_info("OCC Active, CPU frequency is no longer throttled\n"); + + for (i = 0; i < nr_chips; i++) { + chips[i].restore = true; + schedule_work(&chips[i].throttle); + } + + return 0; + } + + if (omsg.throttle_status && + omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS) + pr_info("OCC: Chip %u Pmax reduced due to %s\n", + (unsigned int)omsg.chip, + throttle_reason[omsg.throttle_status]); + else if (!omsg.throttle_status) + pr_info("OCC: Chip %u %s\n", (unsigned int)omsg.chip, + throttle_reason[omsg.throttle_status]); + else + return 0; + + for (i = 0; i < nr_chips; i++) + if (chips[i].id == omsg.chip) { + if (!omsg.throttle_status) + chips[i].restore = true; + schedule_work(&chips[i].throttle); + } + } + return 0; +} + +static struct notifier_block powernv_cpufreq_opal_nb = { + .notifier_call = powernv_cpufreq_occ_msg, + .next = NULL, + .priority = 0, +}; + static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy) { struct powernv_smp_call_data freq_data; @@ -414,6 +545,36 @@ static struct cpufreq_driver powernv_cpufreq_driver = { .attr = powernv_cpu_freq_attr, }; +static int init_chip_info(void) +{ + unsigned int chip[256]; + unsigned int cpu, i; + unsigned int prev_chip_id = UINT_MAX; + + for_each_possible_cpu(cpu) { + unsigned int id = cpu_to_chip_id(cpu); + + if (prev_chip_id != id) { + prev_chip_id = id; + chip[nr_chips++] = id; + } + } + + chips = kmalloc_array(nr_chips, sizeof(struct chip), GFP_KERNEL); + if (!chips) + return -ENOMEM; + + for (i = 0; i < nr_chips; i++) { + chips[i].id = chip[i]; + chips[i].throttled = false; + cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i])); + INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn); + chips[i].restore = false; + } + + return 0; +} + static int __init powernv_cpufreq_init(void) { int rc = 0; @@ -429,7 +590,13 @@ static int __init powernv_cpufreq_init(void) return rc; } + /* Populate chip info */ + rc = init_chip_info(); + if (rc) + return rc; + register_reboot_notifier(&powernv_cpufreq_reboot_nb); + opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb); return cpufreq_register_driver(&powernv_cpufreq_driver); } module_init(powernv_cpufreq_init); @@ -437,6 +604,8 @@ module_init(powernv_cpufreq_init); static void __exit powernv_cpufreq_exit(void) { unregister_reboot_notifier(&powernv_cpufreq_reboot_nb); + opal_message_notifier_unregister(OPAL_MSG_OCC, + &powernv_cpufreq_opal_nb); cpufreq_unregister_driver(&powernv_cpufreq_driver); } module_exit(powernv_cpufreq_exit); diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c index d29e8da39..7969f7690 100644 --- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c +++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c @@ -97,8 +97,8 @@ static int pmi_notifier(struct notifier_block *nb, struct cpufreq_frequency_table *cbe_freqs; u8 node; - /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE - * and CPUFREQ_NOTIFY policy events?) + /* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY + * policy events?) */ if (event == CPUFREQ_START) return 0; diff --git a/drivers/cpufreq/sfi-cpufreq.c b/drivers/cpufreq/sfi-cpufreq.c index ffa3389e5..992ce6f9a 100644 --- a/drivers/cpufreq/sfi-cpufreq.c +++ b/drivers/cpufreq/sfi-cpufreq.c @@ -45,12 +45,10 @@ static int sfi_parse_freq(struct sfi_table_header *table) pentry = (struct sfi_freq_table_entry *)sb->pentry; totallen = num_freq_table_entries * sizeof(*pentry); - sfi_cpufreq_array = kzalloc(totallen, GFP_KERNEL); + sfi_cpufreq_array = kmemdup(pentry, totallen, GFP_KERNEL); if (!sfi_cpufreq_array) return -ENOMEM; - memcpy(sfi_cpufreq_array, pentry, totallen); - return 0; } diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c index 4ab7a2156..15d3214aa 100644 --- a/drivers/cpufreq/speedstep-lib.c +++ b/drivers/cpufreq/speedstep-lib.c @@ -386,7 +386,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, unsigned int prev_speed; unsigned int ret = 0; unsigned long flags; - struct timeval tv1, tv2; + ktime_t tv1, tv2; if ((!processor) || (!low_speed) || (!high_speed) || (!set_state)) return -EINVAL; @@ -415,14 +415,14 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, /* start latency measurement */ if (transition_latency) - do_gettimeofday(&tv1); + tv1 = ktime_get(); /* switch to high state */ set_state(SPEEDSTEP_HIGH); /* end latency measurement */ if (transition_latency) - do_gettimeofday(&tv2); + tv2 = ktime_get(); *high_speed = speedstep_get_frequency(processor); if (!*high_speed) { @@ -442,8 +442,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, set_state(SPEEDSTEP_LOW); if (transition_latency) { - *transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC + - tv2.tv_usec - tv1.tv_usec; + *transition_latency = ktime_to_us(ktime_sub(tv2, tv1)); pr_debug("transition latency is %u uSec\n", *transition_latency); /* convert uSec to nSec and add 20% for safety reasons */ diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c deleted file mode 100644 index 8084c7f7e..000000000 --- a/drivers/cpufreq/tegra-cpufreq.c +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Copyright (C) 2010 Google, Inc. - * - * Author: - * Colin Cross - * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static struct cpufreq_frequency_table freq_table[] = { - { .frequency = 216000 }, - { .frequency = 312000 }, - { .frequency = 456000 }, - { .frequency = 608000 }, - { .frequency = 760000 }, - { .frequency = 816000 }, - { .frequency = 912000 }, - { .frequency = 1000000 }, - { .frequency = CPUFREQ_TABLE_END }, -}; - -#define NUM_CPUS 2 - -static struct clk *cpu_clk; -static struct clk *pll_x_clk; -static struct clk *pll_p_clk; -static struct clk *emc_clk; -static bool pll_x_prepared; - -static unsigned int tegra_get_intermediate(struct cpufreq_policy *policy, - unsigned int index) -{ - unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000; - - /* - * Don't switch to intermediate freq if: - * - we are already at it, i.e. policy->cur == ifreq - * - index corresponds to ifreq - */ - if ((freq_table[index].frequency == ifreq) || (policy->cur == ifreq)) - return 0; - - return ifreq; -} - -static int tegra_target_intermediate(struct cpufreq_policy *policy, - unsigned int index) -{ - int ret; - - /* - * Take an extra reference to the main pll so it doesn't turn - * off when we move the cpu off of it as enabling it again while we - * switch to it from tegra_target() would take additional time. - * - * When target-freq is equal to intermediate freq we don't need to - * switch to an intermediate freq and so this routine isn't called. - * Also, we wouldn't be using pll_x anymore and must not take extra - * reference to it, as it can be disabled now to save some power. - */ - clk_prepare_enable(pll_x_clk); - - ret = clk_set_parent(cpu_clk, pll_p_clk); - if (ret) - clk_disable_unprepare(pll_x_clk); - else - pll_x_prepared = true; - - return ret; -} - -static int tegra_target(struct cpufreq_policy *policy, unsigned int index) -{ - unsigned long rate = freq_table[index].frequency; - unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000; - int ret = 0; - - /* - * Vote on memory bus frequency based on cpu frequency - * This sets the minimum frequency, display or avp may request higher - */ - if (rate >= 816000) - clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */ - else if (rate >= 456000) - clk_set_rate(emc_clk, 300000000); /* cpu 456 MHz, emc 150Mhz */ - else - clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ - - /* - * target freq == pll_p, don't need to take extra reference to pll_x_clk - * as it isn't used anymore. - */ - if (rate == ifreq) - return clk_set_parent(cpu_clk, pll_p_clk); - - ret = clk_set_rate(pll_x_clk, rate * 1000); - /* Restore to earlier frequency on error, i.e. pll_x */ - if (ret) - pr_err("Failed to change pll_x to %lu\n", rate); - - ret = clk_set_parent(cpu_clk, pll_x_clk); - /* This shouldn't fail while changing or restoring */ - WARN_ON(ret); - - /* - * Drop count to pll_x clock only if we switched to intermediate freq - * earlier while transitioning to a target frequency. - */ - if (pll_x_prepared) { - clk_disable_unprepare(pll_x_clk); - pll_x_prepared = false; - } - - return ret; -} - -static int tegra_cpu_init(struct cpufreq_policy *policy) -{ - int ret; - - if (policy->cpu >= NUM_CPUS) - return -EINVAL; - - clk_prepare_enable(emc_clk); - clk_prepare_enable(cpu_clk); - - /* FIXME: what's the actual transition time? */ - ret = cpufreq_generic_init(policy, freq_table, 300 * 1000); - if (ret) { - clk_disable_unprepare(cpu_clk); - clk_disable_unprepare(emc_clk); - return ret; - } - - policy->clk = cpu_clk; - policy->suspend_freq = freq_table[0].frequency; - return 0; -} - -static int tegra_cpu_exit(struct cpufreq_policy *policy) -{ - clk_disable_unprepare(cpu_clk); - clk_disable_unprepare(emc_clk); - return 0; -} - -static struct cpufreq_driver tegra_cpufreq_driver = { - .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, - .verify = cpufreq_generic_frequency_table_verify, - .get_intermediate = tegra_get_intermediate, - .target_intermediate = tegra_target_intermediate, - .target_index = tegra_target, - .get = cpufreq_generic_get, - .init = tegra_cpu_init, - .exit = tegra_cpu_exit, - .name = "tegra", - .attr = cpufreq_generic_attr, -#ifdef CONFIG_PM - .suspend = cpufreq_generic_suspend, -#endif -}; - -static int __init tegra_cpufreq_init(void) -{ - cpu_clk = clk_get_sys(NULL, "cclk"); - if (IS_ERR(cpu_clk)) - return PTR_ERR(cpu_clk); - - pll_x_clk = clk_get_sys(NULL, "pll_x"); - if (IS_ERR(pll_x_clk)) - return PTR_ERR(pll_x_clk); - - pll_p_clk = clk_get_sys(NULL, "pll_p"); - if (IS_ERR(pll_p_clk)) - return PTR_ERR(pll_p_clk); - - emc_clk = clk_get_sys("cpu", "emc"); - if (IS_ERR(emc_clk)) { - clk_put(cpu_clk); - return PTR_ERR(emc_clk); - } - - return cpufreq_register_driver(&tegra_cpufreq_driver); -} - -static void __exit tegra_cpufreq_exit(void) -{ - cpufreq_unregister_driver(&tegra_cpufreq_driver); - clk_put(emc_clk); - clk_put(cpu_clk); -} - - -MODULE_AUTHOR("Colin Cross "); -MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2"); -MODULE_LICENSE("GPL"); -module_init(tegra_cpufreq_init); -module_exit(tegra_cpufreq_exit); diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c new file mode 100644 index 000000000..20bcceb58 --- /dev/null +++ b/drivers/cpufreq/tegra124-cpufreq.c @@ -0,0 +1,214 @@ +/* + * Tegra 124 cpufreq driver + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct tegra124_cpufreq_priv { + struct regulator *vdd_cpu_reg; + struct clk *cpu_clk; + struct clk *pllp_clk; + struct clk *pllx_clk; + struct clk *dfll_clk; + struct platform_device *cpufreq_dt_pdev; +}; + +static int tegra124_cpu_switch_to_dfll(struct tegra124_cpufreq_priv *priv) +{ + struct clk *orig_parent; + int ret; + + ret = clk_set_rate(priv->dfll_clk, clk_get_rate(priv->cpu_clk)); + if (ret) + return ret; + + orig_parent = clk_get_parent(priv->cpu_clk); + clk_set_parent(priv->cpu_clk, priv->pllp_clk); + + ret = clk_prepare_enable(priv->dfll_clk); + if (ret) + goto out; + + clk_set_parent(priv->cpu_clk, priv->dfll_clk); + + return 0; + +out: + clk_set_parent(priv->cpu_clk, orig_parent); + + return ret; +} + +static void tegra124_cpu_switch_to_pllx(struct tegra124_cpufreq_priv *priv) +{ + clk_set_parent(priv->cpu_clk, priv->pllp_clk); + clk_disable_unprepare(priv->dfll_clk); + regulator_sync_voltage(priv->vdd_cpu_reg); + clk_set_parent(priv->cpu_clk, priv->pllx_clk); +} + +static struct cpufreq_dt_platform_data cpufreq_dt_pd = { + .independent_clocks = false, +}; + +static int tegra124_cpufreq_probe(struct platform_device *pdev) +{ + struct tegra124_cpufreq_priv *priv; + struct device_node *np; + struct device *cpu_dev; + struct platform_device_info cpufreq_dt_devinfo = {}; + int ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + cpu_dev = get_cpu_device(0); + if (!cpu_dev) + return -ENODEV; + + np = of_cpu_device_node_get(0); + if (!np) + return -ENODEV; + + priv->vdd_cpu_reg = regulator_get(cpu_dev, "vdd-cpu"); + if (IS_ERR(priv->vdd_cpu_reg)) { + ret = PTR_ERR(priv->vdd_cpu_reg); + goto out_put_np; + } + + priv->cpu_clk = of_clk_get_by_name(np, "cpu_g"); + if (IS_ERR(priv->cpu_clk)) { + ret = PTR_ERR(priv->cpu_clk); + goto out_put_vdd_cpu_reg; + } + + priv->dfll_clk = of_clk_get_by_name(np, "dfll"); + if (IS_ERR(priv->dfll_clk)) { + ret = PTR_ERR(priv->dfll_clk); + goto out_put_cpu_clk; + } + + priv->pllx_clk = of_clk_get_by_name(np, "pll_x"); + if (IS_ERR(priv->pllx_clk)) { + ret = PTR_ERR(priv->pllx_clk); + goto out_put_dfll_clk; + } + + priv->pllp_clk = of_clk_get_by_name(np, "pll_p"); + if (IS_ERR(priv->pllp_clk)) { + ret = PTR_ERR(priv->pllp_clk); + goto out_put_pllx_clk; + } + + ret = tegra124_cpu_switch_to_dfll(priv); + if (ret) + goto out_put_pllp_clk; + + cpufreq_dt_devinfo.name = "cpufreq-dt"; + cpufreq_dt_devinfo.parent = &pdev->dev; + cpufreq_dt_devinfo.data = &cpufreq_dt_pd; + cpufreq_dt_devinfo.size_data = sizeof(cpufreq_dt_pd); + + priv->cpufreq_dt_pdev = + platform_device_register_full(&cpufreq_dt_devinfo); + if (IS_ERR(priv->cpufreq_dt_pdev)) { + ret = PTR_ERR(priv->cpufreq_dt_pdev); + goto out_switch_to_pllx; + } + + platform_set_drvdata(pdev, priv); + + return 0; + +out_switch_to_pllx: + tegra124_cpu_switch_to_pllx(priv); +out_put_pllp_clk: + clk_put(priv->pllp_clk); +out_put_pllx_clk: + clk_put(priv->pllx_clk); +out_put_dfll_clk: + clk_put(priv->dfll_clk); +out_put_cpu_clk: + clk_put(priv->cpu_clk); +out_put_vdd_cpu_reg: + regulator_put(priv->vdd_cpu_reg); +out_put_np: + of_node_put(np); + + return ret; +} + +static int tegra124_cpufreq_remove(struct platform_device *pdev) +{ + struct tegra124_cpufreq_priv *priv = platform_get_drvdata(pdev); + + platform_device_unregister(priv->cpufreq_dt_pdev); + tegra124_cpu_switch_to_pllx(priv); + + clk_put(priv->pllp_clk); + clk_put(priv->pllx_clk); + clk_put(priv->dfll_clk); + clk_put(priv->cpu_clk); + regulator_put(priv->vdd_cpu_reg); + + return 0; +} + +static struct platform_driver tegra124_cpufreq_platdrv = { + .driver.name = "cpufreq-tegra124", + .probe = tegra124_cpufreq_probe, + .remove = tegra124_cpufreq_remove, +}; + +static int __init tegra_cpufreq_init(void) +{ + int ret; + struct platform_device *pdev; + + if (!of_machine_is_compatible("nvidia,tegra124")) + return -ENODEV; + + /* + * Platform driver+device required for handling EPROBE_DEFER with + * the regulator and the DFLL clock + */ + ret = platform_driver_register(&tegra124_cpufreq_platdrv); + if (ret) + return ret; + + pdev = platform_device_register_simple("cpufreq-tegra124", -1, NULL, 0); + if (IS_ERR(pdev)) { + platform_driver_unregister(&tegra124_cpufreq_platdrv); + return PTR_ERR(pdev); + } + + return 0; +} +module_init(tegra_cpufreq_init); + +MODULE_AUTHOR("Tuomas Tynkkynen "); +MODULE_DESCRIPTION("cpufreq driver for NVIDIA Tegra124"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/cpufreq/tegra20-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c new file mode 100644 index 000000000..8084c7f7e --- /dev/null +++ b/drivers/cpufreq/tegra20-cpufreq.c @@ -0,0 +1,218 @@ +/* + * Copyright (C) 2010 Google, Inc. + * + * Author: + * Colin Cross + * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct cpufreq_frequency_table freq_table[] = { + { .frequency = 216000 }, + { .frequency = 312000 }, + { .frequency = 456000 }, + { .frequency = 608000 }, + { .frequency = 760000 }, + { .frequency = 816000 }, + { .frequency = 912000 }, + { .frequency = 1000000 }, + { .frequency = CPUFREQ_TABLE_END }, +}; + +#define NUM_CPUS 2 + +static struct clk *cpu_clk; +static struct clk *pll_x_clk; +static struct clk *pll_p_clk; +static struct clk *emc_clk; +static bool pll_x_prepared; + +static unsigned int tegra_get_intermediate(struct cpufreq_policy *policy, + unsigned int index) +{ + unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000; + + /* + * Don't switch to intermediate freq if: + * - we are already at it, i.e. policy->cur == ifreq + * - index corresponds to ifreq + */ + if ((freq_table[index].frequency == ifreq) || (policy->cur == ifreq)) + return 0; + + return ifreq; +} + +static int tegra_target_intermediate(struct cpufreq_policy *policy, + unsigned int index) +{ + int ret; + + /* + * Take an extra reference to the main pll so it doesn't turn + * off when we move the cpu off of it as enabling it again while we + * switch to it from tegra_target() would take additional time. + * + * When target-freq is equal to intermediate freq we don't need to + * switch to an intermediate freq and so this routine isn't called. + * Also, we wouldn't be using pll_x anymore and must not take extra + * reference to it, as it can be disabled now to save some power. + */ + clk_prepare_enable(pll_x_clk); + + ret = clk_set_parent(cpu_clk, pll_p_clk); + if (ret) + clk_disable_unprepare(pll_x_clk); + else + pll_x_prepared = true; + + return ret; +} + +static int tegra_target(struct cpufreq_policy *policy, unsigned int index) +{ + unsigned long rate = freq_table[index].frequency; + unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000; + int ret = 0; + + /* + * Vote on memory bus frequency based on cpu frequency + * This sets the minimum frequency, display or avp may request higher + */ + if (rate >= 816000) + clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */ + else if (rate >= 456000) + clk_set_rate(emc_clk, 300000000); /* cpu 456 MHz, emc 150Mhz */ + else + clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ + + /* + * target freq == pll_p, don't need to take extra reference to pll_x_clk + * as it isn't used anymore. + */ + if (rate == ifreq) + return clk_set_parent(cpu_clk, pll_p_clk); + + ret = clk_set_rate(pll_x_clk, rate * 1000); + /* Restore to earlier frequency on error, i.e. pll_x */ + if (ret) + pr_err("Failed to change pll_x to %lu\n", rate); + + ret = clk_set_parent(cpu_clk, pll_x_clk); + /* This shouldn't fail while changing or restoring */ + WARN_ON(ret); + + /* + * Drop count to pll_x clock only if we switched to intermediate freq + * earlier while transitioning to a target frequency. + */ + if (pll_x_prepared) { + clk_disable_unprepare(pll_x_clk); + pll_x_prepared = false; + } + + return ret; +} + +static int tegra_cpu_init(struct cpufreq_policy *policy) +{ + int ret; + + if (policy->cpu >= NUM_CPUS) + return -EINVAL; + + clk_prepare_enable(emc_clk); + clk_prepare_enable(cpu_clk); + + /* FIXME: what's the actual transition time? */ + ret = cpufreq_generic_init(policy, freq_table, 300 * 1000); + if (ret) { + clk_disable_unprepare(cpu_clk); + clk_disable_unprepare(emc_clk); + return ret; + } + + policy->clk = cpu_clk; + policy->suspend_freq = freq_table[0].frequency; + return 0; +} + +static int tegra_cpu_exit(struct cpufreq_policy *policy) +{ + clk_disable_unprepare(cpu_clk); + clk_disable_unprepare(emc_clk); + return 0; +} + +static struct cpufreq_driver tegra_cpufreq_driver = { + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .get_intermediate = tegra_get_intermediate, + .target_intermediate = tegra_target_intermediate, + .target_index = tegra_target, + .get = cpufreq_generic_get, + .init = tegra_cpu_init, + .exit = tegra_cpu_exit, + .name = "tegra", + .attr = cpufreq_generic_attr, +#ifdef CONFIG_PM + .suspend = cpufreq_generic_suspend, +#endif +}; + +static int __init tegra_cpufreq_init(void) +{ + cpu_clk = clk_get_sys(NULL, "cclk"); + if (IS_ERR(cpu_clk)) + return PTR_ERR(cpu_clk); + + pll_x_clk = clk_get_sys(NULL, "pll_x"); + if (IS_ERR(pll_x_clk)) + return PTR_ERR(pll_x_clk); + + pll_p_clk = clk_get_sys(NULL, "pll_p"); + if (IS_ERR(pll_p_clk)) + return PTR_ERR(pll_p_clk); + + emc_clk = clk_get_sys("cpu", "emc"); + if (IS_ERR(emc_clk)) { + clk_put(cpu_clk); + return PTR_ERR(emc_clk); + } + + return cpufreq_register_driver(&tegra_cpufreq_driver); +} + +static void __exit tegra_cpufreq_exit(void) +{ + cpufreq_unregister_driver(&tegra_cpufreq_driver); + clk_put(emc_clk); + clk_put(cpu_clk); +} + + +MODULE_AUTHOR("Colin Cross "); +MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2"); +MODULE_LICENSE("GPL"); +module_init(tegra_cpufreq_init); +module_exit(tegra_cpufreq_exit); diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index 7936dce4b..344058f85 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c @@ -176,18 +176,38 @@ void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a) /** * cpuidle_state_is_coupled - check if a state is part of a coupled set - * @dev: struct cpuidle_device for the current cpu * @drv: struct cpuidle_driver for the platform * @state: index of the target state in drv->states * * Returns true if the target state is coupled with cpus besides this one */ -bool cpuidle_state_is_coupled(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int state) +bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state) { return drv->states[state].flags & CPUIDLE_FLAG_COUPLED; } +/** + * cpuidle_coupled_state_verify - check if the coupled states are correctly set. + * @drv: struct cpuidle_driver for the platform + * + * Returns 0 for valid state values, a negative error code otherwise: + * * -EINVAL if any coupled state(safe_state_index) is wrongly set. + */ +int cpuidle_coupled_state_verify(struct cpuidle_driver *drv) +{ + int i; + + for (i = drv->state_count - 1; i >= 0; i--) { + if (cpuidle_state_is_coupled(drv, i) && + (drv->safe_state_index == i || + drv->safe_state_index < 0 || + drv->safe_state_index >= drv->state_count)) + return -EINVAL; + } + + return 0; +} + /** * cpuidle_coupled_set_ready - mark a cpu as ready * @coupled: the struct coupled that contains the current cpu @@ -473,7 +493,7 @@ int cpuidle_enter_state_coupled(struct cpuidle_device *dev, return entered_state; } entered_state = cpuidle_enter_state(dev, drv, - dev->safe_state_index); + drv->safe_state_index); local_irq_disable(); } @@ -521,7 +541,7 @@ retry: } entered_state = cpuidle_enter_state(dev, drv, - dev->safe_state_index); + drv->safe_state_index); local_irq_disable(); } diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c index c13feec89..ea9728fde 100644 --- a/drivers/cpuidle/cpuidle-calxeda.c +++ b/drivers/cpuidle/cpuidle-calxeda.c @@ -25,16 +25,21 @@ #include #include #include +#include + #include #include -#include + +#include + +#define CALXEDA_IDLE_PARAM \ + ((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \ + (0 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \ + (PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT)) static int calxeda_idle_finish(unsigned long val) { - const struct psci_power_state ps = { - .type = PSCI_POWER_STATE_TYPE_POWER_DOWN, - }; - return psci_ops.cpu_suspend(ps, __pa(cpu_resume)); + return psci_ops.cpu_suspend(CALXEDA_IDLE_PARAM, __pa(cpu_resume)); } static int calxeda_pwrdown_idle(struct cpuidle_device *dev, diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 48b722856..17a6dc0e2 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -123,6 +123,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv, * cpuidle mechanism enables interrupts and doing that with timekeeping * suspended is generally unsafe. */ + stop_critical_timings(); drv->states[index].enter_freeze(dev, drv, index); WARN_ON(!irqs_disabled()); /* @@ -131,6 +132,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv, * critical sections, so tell RCU about that. */ RCU_NONIDLE(tick_unfreeze()); + start_critical_timings(); } /** @@ -195,7 +197,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, trace_cpu_idle_rcuidle(index, dev->cpu); time_start = ktime_get(); + stop_critical_timings(); entered_state = target_state->enter(dev, drv, index); + start_critical_timings(); time_end = ktime_get(); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); @@ -210,7 +214,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, tick_broadcast_exit(); } - if (!cpuidle_state_is_coupled(dev, drv, entered_state)) + if (!cpuidle_state_is_coupled(drv, entered_state)) local_irq_enable(); diff = ktime_to_us(ktime_sub(time_end, time_start)); @@ -259,7 +263,7 @@ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index) { - if (cpuidle_state_is_coupled(dev, drv, index)) + if (cpuidle_state_is_coupled(drv, index)) return cpuidle_enter_state_coupled(dev, drv, index); return cpuidle_enter_state(dev, drv, index); } diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h index ee97e9672..f87f399b0 100644 --- a/drivers/cpuidle/cpuidle.h +++ b/drivers/cpuidle/cpuidle.h @@ -34,19 +34,24 @@ extern int cpuidle_add_sysfs(struct cpuidle_device *dev); extern void cpuidle_remove_sysfs(struct cpuidle_device *dev); #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED -bool cpuidle_state_is_coupled(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int state); +bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state); +int cpuidle_coupled_state_verify(struct cpuidle_driver *drv); int cpuidle_enter_state_coupled(struct cpuidle_device *dev, struct cpuidle_driver *drv, int next_state); int cpuidle_coupled_register_device(struct cpuidle_device *dev); void cpuidle_coupled_unregister_device(struct cpuidle_device *dev); #else -static inline bool cpuidle_state_is_coupled(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int state) +static inline +bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state) { return false; } +static inline int cpuidle_coupled_state_verify(struct cpuidle_driver *drv) +{ + return 0; +} + static inline int cpuidle_enter_state_coupled(struct cpuidle_device *dev, struct cpuidle_driver *drv, int next_state) { diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 5db147859..389ade457 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -227,6 +227,10 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv) if (!drv || !drv->state_count) return -EINVAL; + ret = cpuidle_coupled_state_verify(drv); + if (ret) + return ret; + if (cpuidle_disabled()) return -ENODEV; diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 4044125fb..d23471906 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -461,7 +461,7 @@ config CRYPTO_DEV_QCE config CRYPTO_DEV_VMX bool "Support for VMX cryptographic acceleration instructions" - depends on PPC64 + depends on PPC64 && VSX help Support for VMX cryptographic acceleration instructions. @@ -480,4 +480,21 @@ config CRYPTO_DEV_IMGTEC_HASH hardware hash accelerator. Supporting MD5/SHA1/SHA224/SHA256 hashing algorithms. +config CRYPTO_DEV_SUN4I_SS + tristate "Support for Allwinner Security System cryptographic accelerator" + depends on ARCH_SUNXI + select CRYPTO_MD5 + select CRYPTO_SHA1 + select CRYPTO_AES + select CRYPTO_DES + select CRYPTO_BLKCIPHER + help + Some Allwinner SoC have a crypto accelerator named + Security System. Select this if you want to use it. + The Security System handle AES/DES/3DES ciphers in CBC mode + and SHA1 and MD5 hash algorithms. + + To compile this driver as a module, choose M here: the module + will be called sun4i-ss. + endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index e35c07a8d..c3ced6fbd 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -28,3 +28,4 @@ obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ +obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/ diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 3b28e8c3d..192a8fa32 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -1113,7 +1113,7 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data) struct device *dev = (struct device *)data; struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); - if (core_dev->dev->ce_base == 0) + if (!core_dev->dev->ce_base) return 0; writel(PPC4XX_INTERRUPT_CLR, diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c index d9af9403a..2f0b33375 100644 --- a/drivers/crypto/bfin_crc.c +++ b/drivers/crypto/bfin_crc.c @@ -370,8 +370,7 @@ static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc, sg_init_table(ctx->bufsl, nsg); sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len); if (nsg > 1) - scatterwalk_sg_chain(ctx->bufsl, nsg, - req->src); + sg_chain(ctx->bufsl, nsg, req->src); ctx->sg = ctx->bufsl; } else ctx->sg = req->src; diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index e286e285a..5652a5341 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig @@ -1,6 +1,6 @@ config CRYPTO_DEV_FSL_CAAM tristate "Freescale CAAM-Multicore driver backend" - depends on FSL_SOC + depends on FSL_SOC || ARCH_MXC help Enables the driver module for Freescale's Cryptographic Accelerator and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). @@ -112,6 +112,14 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API To compile this as a module, choose M here: the module will be called caamrng. +config CRYPTO_DEV_FSL_CAAM_IMX + def_bool SOC_IMX6 || SOC_IMX7D + depends on CRYPTO_DEV_FSL_CAAM + +config CRYPTO_DEV_FSL_CAAM_LE + def_bool CRYPTO_DEV_FSL_CAAM_IMX || SOC_LS1021A + depends on CRYPTO_DEV_FSL_CAAM + config CRYPTO_DEV_FSL_CAAM_DEBUG bool "Enable debug output in CAAM driver" depends on CRYPTO_DEV_FSL_CAAM diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index daca933a8..ba79d638f 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -68,27 +68,29 @@ #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2) #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ CAAM_CMD_SZ * 4) +#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ + CAAM_CMD_SZ * 5) /* length of descriptors text */ #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) -#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) -#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ) -#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) +#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ) +#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) +#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ) /* Note: Nonce is counted in enckeylen */ -#define DESC_AEAD_CTR_RFC3686_LEN (6 * CAAM_CMD_SZ) +#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ) #define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ) -#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ) -#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ) +#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ) +#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ) #define DESC_GCM_BASE (3 * CAAM_CMD_SZ) #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ) #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ) #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ) -#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ) -#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ) +#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) +#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ) #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ) @@ -111,6 +113,20 @@ #endif static struct list_head alg_list; +struct caam_alg_entry { + int class1_alg_type; + int class2_alg_type; + int alg_op; + bool rfc3686; + bool geniv; +}; + +struct caam_aead_alg { + struct aead_alg aead; + struct caam_alg_entry caam; + bool registered; +}; + /* Set DK bit in class 1 operation if shared */ static inline void append_dec_op1(u32 *desc, u32 type) { @@ -144,18 +160,6 @@ static inline void aead_append_src_dst(u32 *desc, u32 msg_type) KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); } -/* - * For aead encrypt and decrypt, read iv for both classes - */ -static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset) -{ - append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | - LDST_SRCDST_BYTE_CONTEXT | - (ivoffset << LDST_OFFSET_SHIFT)); - append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | - (ivoffset << MOVE_OFFSET_SHIFT) | ivsize); -} - /* * For ablkcipher encrypt and decrypt, read from req->src and * write to req->dst @@ -169,13 +173,6 @@ static inline void ablkcipher_append_src_dst(u32 *desc) append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); } -/* - * If all data, including src (with assoc and iv) or dst (with iv only) are - * contiguous - */ -#define GIV_SRC_CONTIG 1 -#define GIV_DST_CONTIG (1 << 1) - /* * per-session context */ @@ -259,7 +256,6 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, static int aead_null_set_sh_desc(struct crypto_aead *aead) { - unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; bool keys_fit_inline = false; @@ -270,11 +266,11 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer */ - if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN + + if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN + ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX) keys_fit_inline = true; - /* old_aead_encrypt shared descriptor */ + /* aead_encrypt shared descriptor */ desc = ctx->sh_desc_enc; init_sh_desc(desc, HDR_SHARE_SERIAL); @@ -291,20 +287,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) KEY_DEST_MDHA_SPLIT | KEY_ENC); set_jump_tgt_here(desc, key_jump_cmd); - /* cryptlen = seqoutlen - authsize */ - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); - - /* - * NULL encryption; IV is zero - * assoclen = (assoclen + cryptlen) - cryptlen - */ - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); - - /* read assoc before reading payload */ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | - KEY_VLF); + /* assoclen + cryptlen = seqinlen */ + append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); - /* Prepare to read and write cryptlen bytes */ + /* Prepare to read and write cryptlen + assoclen bytes */ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); @@ -363,7 +349,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) desc = ctx->sh_desc_dec; - /* old_aead_decrypt shared descriptor */ + /* aead_decrypt shared descriptor */ init_sh_desc(desc, HDR_SHARE_SERIAL); /* Skip if already shared */ @@ -382,18 +368,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); - /* assoclen + cryptlen = seqinlen - ivsize - authsize */ - append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, - ctx->authsize + ivsize); - /* assoclen = (assoclen + cryptlen) - cryptlen */ + /* assoclen + cryptlen = seqoutlen */ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); - append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); - /* read assoc before reading payload */ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | - KEY_VLF); - - /* Prepare to read and write cryptlen bytes */ + /* Prepare to read and write cryptlen + assoclen bytes */ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); @@ -450,10 +428,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) static int aead_set_sh_desc(struct crypto_aead *aead) { + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), + struct caam_aead_alg, aead); unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead); - struct crypto_tfm *ctfm = crypto_aead_tfm(aead); - const char *alg_name = crypto_tfm_alg_name(ctfm); struct device *jrdev = ctx->jrdev; bool keys_fit_inline; u32 geniv, moveiv; @@ -461,11 +439,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) u32 *desc; const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_CTR_MOD128); - const bool is_rfc3686 = (ctr_mode && - (strstr(alg_name, "rfc3686") != NULL)); - - if (!ctx->authsize) - return 0; + const bool is_rfc3686 = alg->caam.rfc3686; /* NULL encryption / decryption */ if (!ctx->enckeylen) @@ -486,18 +460,21 @@ static int aead_set_sh_desc(struct crypto_aead *aead) if (is_rfc3686) ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; + if (alg->caam.geniv) + goto skip_enc; + /* * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer */ keys_fit_inline = false; - if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN + + if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN + ctx->split_key_pad_len + ctx->enckeylen + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= CAAM_DESC_BYTES_MAX) keys_fit_inline = true; - /* old_aead_encrypt shared descriptor */ + /* aead_encrypt shared descriptor */ desc = ctx->sh_desc_enc; /* Note: Context registers are saved. */ @@ -507,19 +484,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead) append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); - /* cryptlen = seqoutlen - authsize */ - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); - - /* assoclen + cryptlen = seqinlen - ivsize */ - append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, ivsize); + /* Read and write assoclen bytes */ + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); - /* assoclen = (assoclen + cryptlen) - cryptlen */ - append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); + /* Skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); /* read assoc before reading payload */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | - KEY_VLF); - aead_append_ld_iv(desc, ivsize, ctx1_iv_off); + FIFOLDST_VLF); /* Load Counter into CONTEXT1 reg */ if (is_rfc3686) @@ -534,8 +508,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead) OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); /* Read and write cryptlen bytes */ - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); /* Write ICV */ @@ -555,18 +529,19 @@ static int aead_set_sh_desc(struct crypto_aead *aead) desc_bytes(desc), 1); #endif +skip_enc: /* * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer */ keys_fit_inline = false; - if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN + + if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN + ctx->split_key_pad_len + ctx->enckeylen + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= CAAM_DESC_BYTES_MAX) keys_fit_inline = true; - /* old_aead_decrypt shared descriptor */ + /* aead_decrypt shared descriptor */ desc = ctx->sh_desc_dec; /* Note: Context registers are saved. */ @@ -576,19 +551,17 @@ static int aead_set_sh_desc(struct crypto_aead *aead) append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); - /* assoclen + cryptlen = seqinlen - ivsize - authsize */ - append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, - ctx->authsize + ivsize); - /* assoclen = (assoclen + cryptlen) - cryptlen */ - append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); - append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); + /* Read and write assoclen bytes */ + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + + /* Skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); /* read assoc before reading payload */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | KEY_VLF); - aead_append_ld_iv(desc, ivsize, ctx1_iv_off); - /* Load Counter into CONTEXT1 reg */ if (is_rfc3686) append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | @@ -605,8 +578,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead) append_dec_op1(desc, ctx->class1_alg_type); /* Read and write cryptlen bytes */ - append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); - append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); + append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); aead_append_src_dst(desc, FIFOLD_TYPE_MSG); /* Load ICV */ @@ -626,12 +599,15 @@ static int aead_set_sh_desc(struct crypto_aead *aead) desc_bytes(desc), 1); #endif + if (!alg->caam.geniv) + goto skip_givenc; + /* * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer */ keys_fit_inline = false; - if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN + + if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN + ctx->split_key_pad_len + ctx->enckeylen + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= CAAM_DESC_BYTES_MAX) @@ -643,6 +619,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead) /* Note: Context registers are saved. */ init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); + if (is_rfc3686) + goto copy_iv; + /* Generate IV */ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | @@ -656,6 +635,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) (ivsize << MOVE_LEN_SHIFT)); append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); +copy_iv: /* Copy IV to class 1 context */ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | @@ -668,8 +648,12 @@ static int aead_set_sh_desc(struct crypto_aead *aead) /* ivsize + cryptlen = seqoutlen - authsize */ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); - /* assoclen = seqinlen - (ivsize + cryptlen) */ - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); + /* Read and write assoclen bytes */ + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + + /* Skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); /* read assoc before reading payload */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | @@ -710,9 +694,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead) append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT); - ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, - desc_bytes(desc), - DMA_TO_DEVICE); + ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { dev_err(jrdev, "unable to map shared descriptor\n"); return -ENOMEM; @@ -723,6 +707,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) desc_bytes(desc), 1); #endif +skip_givenc: return 0; } @@ -976,22 +961,28 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8); append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); - /* Skip assoc data */ - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); - /* Read assoc data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); - /* cryptlen = seqoutlen - assoclen */ - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + /* Skip IV */ + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); /* Will read cryptlen bytes */ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG); + + /* Skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); + + /* cryptlen = seqoutlen - assoclen */ + append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ); + /* Write encrypted data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); @@ -1044,21 +1035,27 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8); append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); - /* Skip assoc data */ - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); - /* Read assoc data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); - /* Will write cryptlen bytes */ - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); + /* Skip IV */ + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); /* Will read cryptlen bytes */ - append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ); + + /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG); + + /* Skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); + + /* Will write cryptlen bytes */ + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); /* Store payload data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); @@ -1793,22 +1790,6 @@ static void aead_unmap(struct device *dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes); } -static void old_aead_unmap(struct device *dev, - struct aead_edesc *edesc, - struct aead_request *req) -{ - struct crypto_aead *aead = crypto_aead_reqtfm(req); - int ivsize = crypto_aead_ivsize(aead); - - dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents, - DMA_TO_DEVICE, edesc->assoc_chained); - - caam_unmap(dev, req->src, req->dst, - edesc->src_nents, edesc->src_chained, edesc->dst_nents, - edesc->dst_chained, edesc->iv_dma, ivsize, - edesc->sec4_sg_dma, edesc->sec4_sg_bytes); -} - static void ablkcipher_unmap(struct device *dev, struct ablkcipher_edesc *edesc, struct ablkcipher_request *req) @@ -1844,45 +1825,6 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, aead_request_complete(req, err); } -static void old_aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, - void *context) -{ - struct aead_request *req = context; - struct aead_edesc *edesc; -#ifdef DEBUG - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); - int ivsize = crypto_aead_ivsize(aead); - - dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); -#endif - - edesc = (struct aead_edesc *)((char *)desc - - offsetof(struct aead_edesc, hw_desc)); - - if (err) - caam_jr_strstatus(jrdev, err); - - old_aead_unmap(jrdev, edesc, req); - -#ifdef DEBUG - print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), - req->assoclen , 1); - print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize, - edesc->src_nents ? 100 : ivsize, 1); - print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), - edesc->src_nents ? 100 : req->cryptlen + - ctx->authsize + 4, 1); -#endif - - kfree(edesc); - - aead_request_complete(req, err); -} - static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, void *context) { @@ -1911,62 +1853,6 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, aead_request_complete(req, err); } -static void old_aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, - void *context) -{ - struct aead_request *req = context; - struct aead_edesc *edesc; -#ifdef DEBUG - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); - int ivsize = crypto_aead_ivsize(aead); - - dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); -#endif - - edesc = (struct aead_edesc *)((char *)desc - - offsetof(struct aead_edesc, hw_desc)); - -#ifdef DEBUG - print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->iv, - ivsize, 1); - print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), - req->cryptlen - ctx->authsize, 1); -#endif - - if (err) - caam_jr_strstatus(jrdev, err); - - old_aead_unmap(jrdev, edesc, req); - - /* - * verify hw auth check passed else return -EBADMSG - */ - if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) - err = -EBADMSG; - -#ifdef DEBUG - print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, - ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)), - sizeof(struct iphdr) + req->assoclen + - ((req->cryptlen > 1500) ? 1500 : req->cryptlen) + - ctx->authsize + 36, 1); - if (!err && edesc->sec4_sg_bytes) { - struct scatterlist *sg = sg_last(req->src, edesc->src_nents); - print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), - sg->length + ctx->authsize + 16, 1); - } -#endif - - kfree(edesc); - - aead_request_complete(req, err); -} - static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, void *context) { @@ -2032,91 +1918,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, ablkcipher_request_complete(req, err); } -/* - * Fill in aead job descriptor - */ -static void old_init_aead_job(u32 *sh_desc, dma_addr_t ptr, - struct aead_edesc *edesc, - struct aead_request *req, - bool all_contig, bool encrypt) -{ - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); - int ivsize = crypto_aead_ivsize(aead); - int authsize = ctx->authsize; - u32 *desc = edesc->hw_desc; - u32 out_options = 0, in_options; - dma_addr_t dst_dma, src_dma; - int len, sec4_sg_index = 0; - bool is_gcm = false; - -#ifdef DEBUG - debug("assoclen %d cryptlen %d authsize %d\n", - req->assoclen, req->cryptlen, authsize); - print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), - req->assoclen , 1); - print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->iv, - edesc->src_nents ? 100 : ivsize, 1); - print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), - edesc->src_nents ? 100 : req->cryptlen, 1); - print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, - desc_bytes(sh_desc), 1); -#endif - - if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) == - OP_ALG_ALGSEL_AES) && - ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM)) - is_gcm = true; - - len = desc_len(sh_desc); - init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); - - if (all_contig) { - if (is_gcm) - src_dma = edesc->iv_dma; - else - src_dma = sg_dma_address(req->assoc); - in_options = 0; - } else { - src_dma = edesc->sec4_sg_dma; - sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 + - (edesc->src_nents ? : 1); - in_options = LDST_SGF; - } - - append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, - in_options); - - if (likely(req->src == req->dst)) { - if (all_contig) { - dst_dma = sg_dma_address(req->src); - } else { - dst_dma = src_dma + sizeof(struct sec4_sg_entry) * - ((edesc->assoc_nents ? : 1) + 1); - out_options = LDST_SGF; - } - } else { - if (!edesc->dst_nents) { - dst_dma = sg_dma_address(req->dst); - } else { - dst_dma = edesc->sec4_sg_dma + - sec4_sg_index * - sizeof(struct sec4_sg_entry); - out_options = LDST_SGF; - } - } - if (encrypt) - append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize, - out_options); - else - append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, - out_options); -} - /* * Fill in aead job descriptor */ @@ -2208,80 +2009,43 @@ static void init_gcm_job(struct aead_request *req, /* End of blank commands */ } -/* - * Fill in aead givencrypt job descriptor - */ -static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, - struct aead_edesc *edesc, - struct aead_request *req, - int contig) +static void init_authenc_job(struct aead_request *req, + struct aead_edesc *edesc, + bool all_contig, bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), + struct caam_aead_alg, aead); + unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead); - int ivsize = crypto_aead_ivsize(aead); - int authsize = ctx->authsize; + const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == + OP_ALG_AAI_CTR_MOD128); + const bool is_rfc3686 = alg->caam.rfc3686; u32 *desc = edesc->hw_desc; - u32 out_options = 0, in_options; - dma_addr_t dst_dma, src_dma; - int len, sec4_sg_index = 0; - bool is_gcm = false; + u32 ivoffset = 0; -#ifdef DEBUG - debug("assoclen %d cryptlen %d authsize %d\n", - req->assoclen, req->cryptlen, authsize); - print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), - req->assoclen , 1); - print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); - print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), - edesc->src_nents > 1 ? 100 : req->cryptlen, 1); - print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, - desc_bytes(sh_desc), 1); -#endif + /* + * AES-CTR needs to load IV in CONTEXT1 reg + * at an offset of 128bits (16bytes) + * CONTEXT1[255:128] = IV + */ + if (ctr_mode) + ivoffset = 16; - if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) == - OP_ALG_ALGSEL_AES) && - ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM)) - is_gcm = true; + /* + * RFC3686 specific: + * CONTEXT1[255:128] = {NONCE, IV, COUNTER} + */ + if (is_rfc3686) + ivoffset = 16 + CTR_RFC3686_NONCE_SIZE; - len = desc_len(sh_desc); - init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); + init_aead_job(req, edesc, all_contig, encrypt); - if (contig & GIV_SRC_CONTIG) { - if (is_gcm) - src_dma = edesc->iv_dma; - else - src_dma = sg_dma_address(req->assoc); - in_options = 0; - } else { - src_dma = edesc->sec4_sg_dma; - sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; - in_options = LDST_SGF; - } - append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, - in_options); - - if (contig & GIV_DST_CONTIG) { - dst_dma = edesc->iv_dma; - } else { - if (likely(req->src == req->dst)) { - dst_dma = src_dma + sizeof(struct sec4_sg_entry) * - (edesc->assoc_nents + - (is_gcm ? 1 + edesc->src_nents : 0)); - out_options = LDST_SGF; - } else { - dst_dma = edesc->sec4_sg_dma + - sec4_sg_index * - sizeof(struct sec4_sg_entry); - out_options = LDST_SGF; - } - } - - append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize, - out_options); + if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt))) + append_load_as_imm(desc, req->iv, ivsize, + LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + (ivoffset << LDST_OFFSET_SHIFT)); } /* @@ -2389,150 +2153,6 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options); } -/* - * allocate and map the aead extended descriptor - */ -static struct aead_edesc *old_aead_edesc_alloc(struct aead_request *req, - int desc_bytes, - bool *all_contig_ptr, - bool encrypt) -{ - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); - struct device *jrdev = ctx->jrdev; - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; - int assoc_nents, src_nents, dst_nents = 0; - struct aead_edesc *edesc; - dma_addr_t iv_dma = 0; - int sgc; - bool all_contig = true; - bool assoc_chained = false, src_chained = false, dst_chained = false; - int ivsize = crypto_aead_ivsize(aead); - int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; - unsigned int authsize = ctx->authsize; - bool is_gcm = false; - - assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); - - if (unlikely(req->dst != req->src)) { - src_nents = sg_count(req->src, req->cryptlen, &src_chained); - dst_nents = sg_count(req->dst, - req->cryptlen + - (encrypt ? authsize : (-authsize)), - &dst_chained); - } else { - src_nents = sg_count(req->src, - req->cryptlen + - (encrypt ? authsize : 0), - &src_chained); - } - - sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, - DMA_TO_DEVICE, assoc_chained); - if (likely(req->src == req->dst)) { - sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, - DMA_BIDIRECTIONAL, src_chained); - } else { - sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, - DMA_TO_DEVICE, src_chained); - sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, - DMA_FROM_DEVICE, dst_chained); - } - - iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, iv_dma)) { - dev_err(jrdev, "unable to map IV\n"); - return ERR_PTR(-ENOMEM); - } - - if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) == - OP_ALG_ALGSEL_AES) && - ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM)) - is_gcm = true; - - /* - * Check if data are contiguous. - * GCM expected input sequence: IV, AAD, text - * All other - expected input sequence: AAD, IV, text - */ - if (is_gcm) - all_contig = (!assoc_nents && - iv_dma + ivsize == sg_dma_address(req->assoc) && - !src_nents && sg_dma_address(req->assoc) + - req->assoclen == sg_dma_address(req->src)); - else - all_contig = (!assoc_nents && sg_dma_address(req->assoc) + - req->assoclen == iv_dma && !src_nents && - iv_dma + ivsize == sg_dma_address(req->src)); - if (!all_contig) { - assoc_nents = assoc_nents ? : 1; - src_nents = src_nents ? : 1; - sec4_sg_len = assoc_nents + 1 + src_nents; - } - - sec4_sg_len += dst_nents; - - sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); - - /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + - sec4_sg_bytes, GFP_DMA | flags); - if (!edesc) { - dev_err(jrdev, "could not allocate extended descriptor\n"); - return ERR_PTR(-ENOMEM); - } - - edesc->assoc_nents = assoc_nents; - edesc->assoc_chained = assoc_chained; - edesc->src_nents = src_nents; - edesc->src_chained = src_chained; - edesc->dst_nents = dst_nents; - edesc->dst_chained = dst_chained; - edesc->iv_dma = iv_dma; - edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + - desc_bytes; - *all_contig_ptr = all_contig; - - sec4_sg_index = 0; - if (!all_contig) { - if (!is_gcm) { - sg_to_sec4_sg_len(req->assoc, req->assoclen, - edesc->sec4_sg + sec4_sg_index); - sec4_sg_index += assoc_nents; - } - - dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, - iv_dma, ivsize, 0); - sec4_sg_index += 1; - - if (is_gcm) { - sg_to_sec4_sg_len(req->assoc, req->assoclen, - edesc->sec4_sg + sec4_sg_index); - sec4_sg_index += assoc_nents; - } - - sg_to_sec4_sg_last(req->src, - src_nents, - edesc->sec4_sg + - sec4_sg_index, 0); - sec4_sg_index += src_nents; - } - if (dst_nents) { - sg_to_sec4_sg_last(req->dst, dst_nents, - edesc->sec4_sg + sec4_sg_index, 0); - } - edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, - sec4_sg_bytes, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { - dev_err(jrdev, "unable to map S/G table\n"); - return ERR_PTR(-ENOMEM); - } - - return edesc; -} - /* * allocate and map the aead extended descriptor */ @@ -2579,8 +2199,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes + - sec4_sg_bytes, GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, + GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return ERR_PTR(-ENOMEM); @@ -2685,7 +2305,15 @@ static int gcm_encrypt(struct aead_request *req) return ret; } -static int old_aead_encrypt(struct aead_request *req) +static int ipsec_gcm_encrypt(struct aead_request *req) +{ + if (req->assoclen < 8) + return -EINVAL; + + return gcm_encrypt(req); +} + +static int aead_encrypt(struct aead_request *req) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); @@ -2696,14 +2324,13 @@ static int old_aead_encrypt(struct aead_request *req) int ret = 0; /* allocate extended descriptor */ - edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &all_contig, true); + edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, + &all_contig, true); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* Create and submit job descriptor */ - old_init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, - all_contig, true); + init_authenc_job(req, edesc, all_contig, true); #ifdef DEBUG print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, @@ -2711,11 +2338,11 @@ static int old_aead_encrypt(struct aead_request *req) #endif desc = edesc->hw_desc; - ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req); + ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); if (!ret) { ret = -EINPROGRESS; } else { - old_aead_unmap(jrdev, edesc, req); + aead_unmap(jrdev, edesc, req); kfree(edesc); } @@ -2757,7 +2384,15 @@ static int gcm_decrypt(struct aead_request *req) return ret; } -static int old_aead_decrypt(struct aead_request *req) +static int ipsec_gcm_decrypt(struct aead_request *req) +{ + if (req->assoclen < 8) + return -EINVAL; + + return gcm_decrypt(req); +} + +static int aead_decrypt(struct aead_request *req) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); @@ -2768,20 +2403,19 @@ static int old_aead_decrypt(struct aead_request *req) int ret = 0; /* allocate extended descriptor */ - edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &all_contig, false); + edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, + &all_contig, false); if (IS_ERR(edesc)) return PTR_ERR(edesc); #ifdef DEBUG print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), - req->cryptlen, 1); + req->assoclen + req->cryptlen, 1); #endif /* Create and submit job descriptor*/ - old_init_aead_job(ctx->sh_desc_dec, - ctx->sh_desc_dec_dma, edesc, req, all_contig, false); + init_authenc_job(req, edesc, all_contig, false); #ifdef DEBUG print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, @@ -2789,49 +2423,58 @@ static int old_aead_decrypt(struct aead_request *req) #endif desc = edesc->hw_desc; - ret = caam_jr_enqueue(jrdev, desc, old_aead_decrypt_done, req); + ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); if (!ret) { ret = -EINPROGRESS; } else { - old_aead_unmap(jrdev, edesc, req); + aead_unmap(jrdev, edesc, req); kfree(edesc); } return ret; } +static int aead_givdecrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + unsigned int ivsize = crypto_aead_ivsize(aead); + + if (req->cryptlen < ivsize) + return -EINVAL; + + req->cryptlen -= ivsize; + req->assoclen += ivsize; + + return aead_decrypt(req); +} + /* - * allocate and map the aead extended descriptor for aead givencrypt + * allocate and map the ablkcipher extended descriptor for ablkcipher */ -static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request - *greq, int desc_bytes, - u32 *contig_ptr) +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request + *req, int desc_bytes, + bool *iv_contig_out) { - struct aead_request *req = &greq->areq; - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); struct device *jrdev = ctx->jrdev; gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; - int assoc_nents, src_nents, dst_nents = 0; - struct aead_edesc *edesc; + CRYPTO_TFM_REQ_MAY_SLEEP)) ? + GFP_KERNEL : GFP_ATOMIC; + int src_nents, dst_nents = 0, sec4_sg_bytes; + struct ablkcipher_edesc *edesc; dma_addr_t iv_dma = 0; + bool iv_contig = false; int sgc; - u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; - int ivsize = crypto_aead_ivsize(aead); - bool assoc_chained = false, src_chained = false, dst_chained = false; - int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; - bool is_gcm = false; + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + bool src_chained = false, dst_chained = false; + int sec4_sg_index; - assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); - src_nents = sg_count(req->src, req->cryptlen, &src_chained); + src_nents = sg_count(req->src, req->nbytes, &src_chained); - if (unlikely(req->dst != req->src)) - dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize, - &dst_chained); + if (req->dst != req->src) + dst_nents = sg_count(req->dst, req->nbytes, &dst_chained); - sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, - DMA_TO_DEVICE, assoc_chained); if (likely(req->src == req->dst)) { sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_BIDIRECTIONAL, src_chained); @@ -2842,121 +2485,52 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request DMA_FROM_DEVICE, dst_chained); } - iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); + iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, iv_dma)) { dev_err(jrdev, "unable to map IV\n"); return ERR_PTR(-ENOMEM); } - if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) == - OP_ALG_ALGSEL_AES) && - ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM)) - is_gcm = true; - /* - * Check if data are contiguous. - * GCM expected input sequence: IV, AAD, text - * All other - expected input sequence: AAD, IV, text + * Check if iv can be contiguous with source and destination. + * If so, include it. If not, create scatterlist. */ - - if (is_gcm) { - if (assoc_nents || iv_dma + ivsize != - sg_dma_address(req->assoc) || src_nents || - sg_dma_address(req->assoc) + req->assoclen != - sg_dma_address(req->src)) - contig &= ~GIV_SRC_CONTIG; - } else { - if (assoc_nents || - sg_dma_address(req->assoc) + req->assoclen != iv_dma || - src_nents || iv_dma + ivsize != sg_dma_address(req->src)) - contig &= ~GIV_SRC_CONTIG; - } - - if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst)) - contig &= ~GIV_DST_CONTIG; - - if (!(contig & GIV_SRC_CONTIG)) { - assoc_nents = assoc_nents ? : 1; + if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src)) + iv_contig = true; + else src_nents = src_nents ? : 1; - sec4_sg_len += assoc_nents + 1 + src_nents; - if (req->src == req->dst && - (src_nents || iv_dma + ivsize != sg_dma_address(req->src))) - contig &= ~GIV_DST_CONTIG; - } - - /* - * Add new sg entries for GCM output sequence. - * Expected output sequence: IV, encrypted text. - */ - if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) - sec4_sg_len += 1 + src_nents; - - if (unlikely(req->src != req->dst)) { - dst_nents = dst_nents ? : 1; - sec4_sg_len += 1 + dst_nents; - } - - sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); + sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * + sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + - sec4_sg_bytes, GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, + GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return ERR_PTR(-ENOMEM); } - edesc->assoc_nents = assoc_nents; - edesc->assoc_chained = assoc_chained; edesc->src_nents = src_nents; edesc->src_chained = src_chained; edesc->dst_nents = dst_nents; edesc->dst_chained = dst_chained; - edesc->iv_dma = iv_dma; edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + + edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + desc_bytes; - *contig_ptr = contig; sec4_sg_index = 0; - if (!(contig & GIV_SRC_CONTIG)) { - if (!is_gcm) { - sg_to_sec4_sg_len(req->assoc, req->assoclen, - edesc->sec4_sg + sec4_sg_index); - sec4_sg_index += assoc_nents; - } - - dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, - iv_dma, ivsize, 0); - sec4_sg_index += 1; - - if (is_gcm) { - sg_to_sec4_sg_len(req->assoc, req->assoclen, - edesc->sec4_sg + sec4_sg_index); - sec4_sg_index += assoc_nents; - } - - sg_to_sec4_sg_last(req->src, src_nents, - edesc->sec4_sg + - sec4_sg_index, 0); - sec4_sg_index += src_nents; - } - - if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) { - dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, - iv_dma, ivsize, 0); - sec4_sg_index += 1; + if (!iv_contig) { + dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); sg_to_sec4_sg_last(req->src, src_nents, - edesc->sec4_sg + sec4_sg_index, 0); + edesc->sec4_sg + 1, 0); + sec4_sg_index += 1 + src_nents; } - if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) { - dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, - iv_dma, ivsize, 0); - sec4_sg_index += 1; + if (dst_nents) { sg_to_sec4_sg_last(req->dst, dst_nents, - edesc->sec4_sg + sec4_sg_index, 0); + edesc->sec4_sg + sec4_sg_index, 0); } + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { @@ -2964,201 +2538,58 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request return ERR_PTR(-ENOMEM); } + edesc->iv_dma = iv_dma; + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, + sec4_sg_bytes, 1); +#endif + + *iv_contig_out = iv_contig; return edesc; } -static int old_aead_givencrypt(struct aead_givcrypt_request *areq) +static int ablkcipher_encrypt(struct ablkcipher_request *req) { - struct aead_request *req = &areq->areq; - struct aead_edesc *edesc; - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct ablkcipher_edesc *edesc; + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); struct device *jrdev = ctx->jrdev; - u32 contig; + bool iv_contig; u32 *desc; int ret = 0; /* allocate extended descriptor */ - edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &contig); - + edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * + CAAM_CMD_SZ, &iv_contig); if (IS_ERR(edesc)) return PTR_ERR(edesc); -#ifdef DEBUG - print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), - req->cryptlen, 1); -#endif - /* Create and submit job descriptor*/ - init_aead_giv_job(ctx->sh_desc_givenc, - ctx->sh_desc_givenc_dma, edesc, req, contig); + init_ablkcipher_job(ctx->sh_desc_enc, + ctx->sh_desc_enc_dma, edesc, req, iv_contig); #ifdef DEBUG - print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", + print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, desc_bytes(edesc->hw_desc), 1); #endif - desc = edesc->hw_desc; - ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req); + ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); + if (!ret) { ret = -EINPROGRESS; } else { - old_aead_unmap(jrdev, edesc, req); + ablkcipher_unmap(jrdev, edesc, req); kfree(edesc); } return ret; } -static int aead_null_givencrypt(struct aead_givcrypt_request *areq) -{ - return old_aead_encrypt(&areq->areq); -} - -/* - * allocate and map the ablkcipher extended descriptor for ablkcipher - */ -static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request - *req, int desc_bytes, - bool *iv_contig_out) +static int ablkcipher_decrypt(struct ablkcipher_request *req) { - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); - struct device *jrdev = ctx->jrdev; - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | - CRYPTO_TFM_REQ_MAY_SLEEP)) ? - GFP_KERNEL : GFP_ATOMIC; - int src_nents, dst_nents = 0, sec4_sg_bytes; - struct ablkcipher_edesc *edesc; - dma_addr_t iv_dma = 0; - bool iv_contig = false; - int sgc; - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); - bool src_chained = false, dst_chained = false; - int sec4_sg_index; - - src_nents = sg_count(req->src, req->nbytes, &src_chained); - - if (req->dst != req->src) - dst_nents = sg_count(req->dst, req->nbytes, &dst_chained); - - if (likely(req->src == req->dst)) { - sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, - DMA_BIDIRECTIONAL, src_chained); - } else { - sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, - DMA_TO_DEVICE, src_chained); - sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, - DMA_FROM_DEVICE, dst_chained); - } - - iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, iv_dma)) { - dev_err(jrdev, "unable to map IV\n"); - return ERR_PTR(-ENOMEM); - } - - /* - * Check if iv can be contiguous with source and destination. - * If so, include it. If not, create scatterlist. - */ - if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src)) - iv_contig = true; - else - src_nents = src_nents ? : 1; - sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * - sizeof(struct sec4_sg_entry); - - /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes + - sec4_sg_bytes, GFP_DMA | flags); - if (!edesc) { - dev_err(jrdev, "could not allocate extended descriptor\n"); - return ERR_PTR(-ENOMEM); - } - - edesc->src_nents = src_nents; - edesc->src_chained = src_chained; - edesc->dst_nents = dst_nents; - edesc->dst_chained = dst_chained; - edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + - desc_bytes; - - sec4_sg_index = 0; - if (!iv_contig) { - dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); - sg_to_sec4_sg_last(req->src, src_nents, - edesc->sec4_sg + 1, 0); - sec4_sg_index += 1 + src_nents; - } - - if (dst_nents) { - sg_to_sec4_sg_last(req->dst, dst_nents, - edesc->sec4_sg + sec4_sg_index, 0); - } - - edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, - sec4_sg_bytes, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { - dev_err(jrdev, "unable to map S/G table\n"); - return ERR_PTR(-ENOMEM); - } - - edesc->iv_dma = iv_dma; - -#ifdef DEBUG - print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, - sec4_sg_bytes, 1); -#endif - - *iv_contig_out = iv_contig; - return edesc; -} - -static int ablkcipher_encrypt(struct ablkcipher_request *req) -{ - struct ablkcipher_edesc *edesc; - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); - struct device *jrdev = ctx->jrdev; - bool iv_contig; - u32 *desc; - int ret = 0; - - /* allocate extended descriptor */ - edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &iv_contig); - if (IS_ERR(edesc)) - return PTR_ERR(edesc); - - /* Create and submit job descriptor*/ - init_ablkcipher_job(ctx->sh_desc_enc, - ctx->sh_desc_enc_dma, edesc, req, iv_contig); -#ifdef DEBUG - print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, - desc_bytes(edesc->hw_desc), 1); -#endif - desc = edesc->hw_desc; - ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); - - if (!ret) { - ret = -EINPROGRESS; - } else { - ablkcipher_unmap(jrdev, edesc, req); - kfree(edesc); - } - - return ret; -} - -static int ablkcipher_decrypt(struct ablkcipher_request *req) -{ - struct ablkcipher_edesc *edesc; + struct ablkcipher_edesc *edesc; struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); struct device *jrdev = ctx->jrdev; @@ -3251,8 +2682,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(*edesc) + desc_bytes + - sec4_sg_bytes, GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, + GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return ERR_PTR(-ENOMEM); @@ -3347,7 +2778,6 @@ struct caam_alg_template { u32 type; union { struct ablkcipher_alg ablkcipher; - struct old_aead_alg aead; } template_u; u32 class1_alg_type; u32 class2_alg_type; @@ -3355,753 +2785,1426 @@ struct caam_alg_template { }; static struct caam_alg_template driver_algs[] = { - /* single-pass ipsec_esp descriptor */ - { - .name = "authenc(hmac(md5),ecb(cipher_null))", - .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam", - .blocksize = NULL_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { - .setkey = aead_setkey, - .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = aead_null_givencrypt, - .geniv = "", - .ivsize = NULL_IV_SIZE, - .maxauthsize = MD5_DIGEST_SIZE, - }, - .class1_alg_type = 0, - .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, - }, + /* ablkcipher descriptor */ { - .name = "authenc(hmac(sha1),ecb(cipher_null))", - .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam", - .blocksize = NULL_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { - .setkey = aead_setkey, - .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = aead_null_givencrypt, + .name = "cbc(aes)", + .driver_name = "cbc-aes-caam", + .blocksize = AES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_GIVCIPHER, + .template_ablkcipher = { + .setkey = ablkcipher_setkey, + .encrypt = ablkcipher_encrypt, + .decrypt = ablkcipher_decrypt, + .givencrypt = ablkcipher_givencrypt, .geniv = "", - .ivsize = NULL_IV_SIZE, - .maxauthsize = SHA1_DIGEST_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, }, - .class1_alg_type = 0, - .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, }, { - .name = "authenc(hmac(sha224),ecb(cipher_null))", - .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam", - .blocksize = NULL_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { - .setkey = aead_setkey, - .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = aead_null_givencrypt, + .name = "cbc(des3_ede)", + .driver_name = "cbc-3des-caam", + .blocksize = DES3_EDE_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_GIVCIPHER, + .template_ablkcipher = { + .setkey = ablkcipher_setkey, + .encrypt = ablkcipher_encrypt, + .decrypt = ablkcipher_decrypt, + .givencrypt = ablkcipher_givencrypt, .geniv = "", - .ivsize = NULL_IV_SIZE, - .maxauthsize = SHA224_DIGEST_SIZE, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, }, - .class1_alg_type = 0, - .class2_alg_type = OP_ALG_ALGSEL_SHA224 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, }, { - .name = "authenc(hmac(sha256),ecb(cipher_null))", - .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam", - .blocksize = NULL_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { - .setkey = aead_setkey, - .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = aead_null_givencrypt, + .name = "cbc(des)", + .driver_name = "cbc-des-caam", + .blocksize = DES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_GIVCIPHER, + .template_ablkcipher = { + .setkey = ablkcipher_setkey, + .encrypt = ablkcipher_encrypt, + .decrypt = ablkcipher_decrypt, + .givencrypt = ablkcipher_givencrypt, .geniv = "", - .ivsize = NULL_IV_SIZE, - .maxauthsize = SHA256_DIGEST_SIZE, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, }, - .class1_alg_type = 0, - .class2_alg_type = OP_ALG_ALGSEL_SHA256 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, }, { - .name = "authenc(hmac(sha384),ecb(cipher_null))", - .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam", - .blocksize = NULL_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { - .setkey = aead_setkey, - .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = aead_null_givencrypt, - .geniv = "", - .ivsize = NULL_IV_SIZE, - .maxauthsize = SHA384_DIGEST_SIZE, + .name = "ctr(aes)", + .driver_name = "ctr-aes-caam", + .blocksize = 1, + .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .template_ablkcipher = { + .setkey = ablkcipher_setkey, + .encrypt = ablkcipher_encrypt, + .decrypt = ablkcipher_decrypt, + .geniv = "chainiv", + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, }, - .class1_alg_type = 0, - .class2_alg_type = OP_ALG_ALGSEL_SHA384 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, }, { - .name = "authenc(hmac(sha512),ecb(cipher_null))", - .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam", - .blocksize = NULL_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { - .setkey = aead_setkey, - .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = aead_null_givencrypt, + .name = "rfc3686(ctr(aes))", + .driver_name = "rfc3686-ctr-aes-caam", + .blocksize = 1, + .type = CRYPTO_ALG_TYPE_GIVCIPHER, + .template_ablkcipher = { + .setkey = ablkcipher_setkey, + .encrypt = ablkcipher_encrypt, + .decrypt = ablkcipher_decrypt, + .givencrypt = ablkcipher_givencrypt, .geniv = "", - .ivsize = NULL_IV_SIZE, - .maxauthsize = SHA512_DIGEST_SIZE, + .min_keysize = AES_MIN_KEY_SIZE + + CTR_RFC3686_NONCE_SIZE, + .max_keysize = AES_MAX_KEY_SIZE + + CTR_RFC3686_NONCE_SIZE, + .ivsize = CTR_RFC3686_IV_SIZE, }, - .class1_alg_type = 0, - .class2_alg_type = OP_ALG_ALGSEL_SHA512 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, - }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, + } +}; + +static struct caam_aead_alg driver_aeads[] = { { - .name = "authenc(hmac(md5),cbc(aes))", - .driver_name = "authenc-hmac-md5-cbc-aes-caam", - .blocksize = AES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { - .setkey = aead_setkey, - .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = MD5_DIGEST_SIZE, + .aead = { + .base = { + .cra_name = "rfc4106(gcm(aes))", + .cra_driver_name = "rfc4106-gcm-aes-caam", + .cra_blocksize = 1, }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + .setkey = rfc4106_setkey, + .setauthsize = rfc4106_setauthsize, + .encrypt = ipsec_gcm_encrypt, + .decrypt = ipsec_gcm_decrypt, + .ivsize = 8, + .maxauthsize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + }, }, { - .name = "authenc(hmac(sha1),cbc(aes))", - .driver_name = "authenc-hmac-sha1-cbc-aes-caam", - .blocksize = AES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "rfc4543(gcm(aes))", + .cra_driver_name = "rfc4543-gcm-aes-caam", + .cra_blocksize = 1, + }, + .setkey = rfc4543_setkey, + .setauthsize = rfc4543_setauthsize, + .encrypt = ipsec_gcm_encrypt, + .decrypt = ipsec_gcm_decrypt, + .ivsize = 8, + .maxauthsize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + }, + }, + /* Galois Counter Mode */ + { + .aead = { + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "gcm-aes-caam", + .cra_blocksize = 1, + }, + .setkey = gcm_setkey, + .setauthsize = gcm_setauthsize, + .encrypt = gcm_encrypt, + .decrypt = gcm_decrypt, + .ivsize = 12, + .maxauthsize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + }, + }, + /* single-pass ipsec_esp descriptor */ + { + .aead = { + .base = { + .cra_name = "authenc(hmac(md5)," + "ecb(cipher_null))", + .cra_driver_name = "authenc-hmac-md5-" + "ecb-cipher_null-caam", + .cra_blocksize = NULL_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = NULL_IV_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha1)," + "ecb(cipher_null))", + .cra_driver_name = "authenc-hmac-sha1-" + "ecb-cipher_null-caam", + .cra_blocksize = NULL_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha224)," + "ecb(cipher_null))", + .cra_driver_name = "authenc-hmac-sha224-" + "ecb-cipher_null-caam", + .cra_blocksize = NULL_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha256)," + "ecb(cipher_null))", + .cra_driver_name = "authenc-hmac-sha256-" + "ecb-cipher_null-caam", + .cra_blocksize = NULL_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha384)," + "ecb(cipher_null))", + .cra_driver_name = "authenc-hmac-sha384-" + "ecb-cipher_null-caam", + .cra_blocksize = NULL_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha512)," + "ecb(cipher_null))", + .cra_driver_name = "authenc-hmac-sha512-" + "ecb-cipher_null-caam", + .cra_blocksize = NULL_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(md5),cbc(aes))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(md5)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-hmac-md5-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha1)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha1-cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .geniv = true, + }, }, { - .name = "authenc(hmac(sha224),cbc(aes))", - .driver_name = "authenc-hmac-sha224-cbc-aes-caam", - .blocksize = AES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha224),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha224-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha224)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha224-cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA224 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .geniv = true, + }, }, { - .name = "authenc(hmac(sha256),cbc(aes))", - .driver_name = "authenc-hmac-sha256-cbc-aes-caam", - .blocksize = AES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha256)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha256-cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA256 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .geniv = true, + }, }, { - .name = "authenc(hmac(sha384),cbc(aes))", - .driver_name = "authenc-hmac-sha384-cbc-aes-caam", - .blocksize = AES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha384),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha384-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha384)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha384-cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA384 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .geniv = true, + }, }, - { - .name = "authenc(hmac(sha512),cbc(aes))", - .driver_name = "authenc-hmac-sha512-cbc-aes-caam", - .blocksize = AES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha512),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha512-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha512)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha512-cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA512 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + .geniv = true, + }, }, { - .name = "authenc(hmac(md5),cbc(des3_ede))", - .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam", - .blocksize = DES3_EDE_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "authenc(hmac(md5),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + } + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(md5)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-hmac-md5-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + .geniv = true, + } }, { - .name = "authenc(hmac(sha1),cbc(des3_ede))", - .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam", - .blocksize = DES3_EDE_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha1)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha1)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha1-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .geniv = true, + }, }, { - .name = "authenc(hmac(sha224),cbc(des3_ede))", - .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam", - .blocksize = DES3_EDE_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha224)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha224-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha224)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha224-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA224 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .geniv = true, + }, }, { - .name = "authenc(hmac(sha256),cbc(des3_ede))", - .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam", - .blocksize = DES3_EDE_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha256)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha256)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha256-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA256 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .geniv = true, + }, }, { - .name = "authenc(hmac(sha384),cbc(des3_ede))", - .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam", - .blocksize = DES3_EDE_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha384)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha384-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha384)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha384-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA384 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .geniv = true, + }, }, { - .name = "authenc(hmac(sha512),cbc(des3_ede))", - .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam", - .blocksize = DES3_EDE_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha512)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha512-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha512)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha512-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA512 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + .geniv = true, + }, }, { - .name = "authenc(hmac(md5),cbc(des))", - .driver_name = "authenc-hmac-md5-cbc-des-caam", - .blocksize = DES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "authenc(hmac(md5),cbc(des))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(md5)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-hmac-md5-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + .geniv = true, + }, }, { - .name = "authenc(hmac(sha1),cbc(des))", - .driver_name = "authenc-hmac-sha1-cbc-des-caam", - .blocksize = DES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha1),cbc(des))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha1)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha1-cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .geniv = true, + }, }, { - .name = "authenc(hmac(sha224),cbc(des))", - .driver_name = "authenc-hmac-sha224-cbc-des-caam", - .blocksize = DES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha224),cbc(des))", + .cra_driver_name = "authenc-hmac-sha224-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha224)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha224-cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA224 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .geniv = true, + }, }, { - .name = "authenc(hmac(sha256),cbc(des))", - .driver_name = "authenc-hmac-sha256-cbc-des-caam", - .blocksize = DES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha256),cbc(des))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha256)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha256-cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA256 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .geniv = true, + }, }, { - .name = "authenc(hmac(sha384),cbc(des))", - .driver_name = "authenc-hmac-sha384-cbc-des-caam", - .blocksize = DES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha384),cbc(des))", + .cra_driver_name = "authenc-hmac-sha384-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha384)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha384-cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, }, - .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA384 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha512),cbc(des))", + .cra_driver_name = "authenc-hmac-sha512-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + }, }, { - .name = "authenc(hmac(sha512),cbc(des))", - .driver_name = "authenc-hmac-sha512-cbc-des-caam", - .blocksize = DES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha512)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha512-cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, - .class2_alg_type = OP_ALG_ALGSEL_SHA512 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + .geniv = true, + }, }, { - .name = "authenc(hmac(md5),rfc3686(ctr(aes)))", - .driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam", - .blocksize = 1, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "authenc(hmac(md5)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-md5-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = MD5_DIGEST_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, - .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + }, }, { - .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", - .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam", - .blocksize = 1, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "seqiv(authenc(" + "hmac(md5),rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-md5-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, .ivsize = CTR_RFC3686_IV_SIZE, - .maxauthsize = SHA1_DIGEST_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, - .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + .geniv = true, + }, }, { - .name = "authenc(hmac(sha224),rfc3686(ctr(aes)))", - .driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam", - .blocksize = 1, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha1)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-sha1-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, - .maxauthsize = SHA224_DIGEST_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, - .class2_alg_type = OP_ALG_ALGSEL_SHA224 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + }, }, { - .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", - .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam", - .blocksize = 1, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "seqiv(authenc(" + "hmac(sha1),rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-sha1-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, .ivsize = CTR_RFC3686_IV_SIZE, - .maxauthsize = SHA256_DIGEST_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, - .class2_alg_type = OP_ALG_ALGSEL_SHA256 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + .geniv = true, + }, }, { - .name = "authenc(hmac(sha384),rfc3686(ctr(aes)))", - .driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam", - .blocksize = 1, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha224)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-sha224-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, - .maxauthsize = SHA384_DIGEST_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, - .class2_alg_type = OP_ALG_ALGSEL_SHA384 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + }, }, { - .name = "authenc(hmac(sha512),rfc3686(ctr(aes)))", - .driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam", - .blocksize = 1, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { + .aead = { + .base = { + .cra_name = "seqiv(authenc(" + "hmac(sha224),rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-sha224-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = old_aead_encrypt, - .decrypt = old_aead_decrypt, - .givencrypt = old_aead_givencrypt, - .geniv = "", + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, .ivsize = CTR_RFC3686_IV_SIZE, - .maxauthsize = SHA512_DIGEST_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, - .class2_alg_type = OP_ALG_ALGSEL_SHA512 | - OP_ALG_AAI_HMAC_PRECOMP, - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, - }, - /* ablkcipher descriptor */ - { - .name = "cbc(aes)", - .driver_name = "cbc-aes-caam", - .blocksize = AES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_GIVCIPHER, - .template_ablkcipher = { - .setkey = ablkcipher_setkey, - .encrypt = ablkcipher_encrypt, - .decrypt = ablkcipher_decrypt, - .givencrypt = ablkcipher_givencrypt, - .geniv = "", - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + .geniv = true, + }, }, { - .name = "cbc(des3_ede)", - .driver_name = "cbc-3des-caam", - .blocksize = DES3_EDE_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_GIVCIPHER, - .template_ablkcipher = { - .setkey = ablkcipher_setkey, - .encrypt = ablkcipher_encrypt, - .decrypt = ablkcipher_decrypt, - .givencrypt = ablkcipher_givencrypt, - .geniv = "", - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, + .aead = { + .base = { + .cra_name = "authenc(hmac(sha256)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-sha256-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, }, - .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + }, }, { - .name = "cbc(des)", - .driver_name = "cbc-des-caam", - .blocksize = DES_BLOCK_SIZE, - .type = CRYPTO_ALG_TYPE_GIVCIPHER, - .template_ablkcipher = { - .setkey = ablkcipher_setkey, - .encrypt = ablkcipher_encrypt, - .decrypt = ablkcipher_decrypt, - .givencrypt = ablkcipher_givencrypt, - .geniv = "", - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, + .aead = { + .base = { + .cra_name = "seqiv(authenc(hmac(sha256)," + "rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-sha256-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, }, - .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + .geniv = true, + }, }, { - .name = "ctr(aes)", - .driver_name = "ctr-aes-caam", - .blocksize = 1, - .type = CRYPTO_ALG_TYPE_ABLKCIPHER, - .template_ablkcipher = { - .setkey = ablkcipher_setkey, - .encrypt = ablkcipher_encrypt, - .decrypt = ablkcipher_decrypt, - .geniv = "chainiv", - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, + .aead = { + .base = { + .cra_name = "authenc(hmac(sha384)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-sha384-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, - }, - { - .name = "rfc3686(ctr(aes))", - .driver_name = "rfc3686-ctr-aes-caam", - .blocksize = 1, - .type = CRYPTO_ALG_TYPE_GIVCIPHER, - .template_ablkcipher = { - .setkey = ablkcipher_setkey, - .encrypt = ablkcipher_encrypt, - .decrypt = ablkcipher_decrypt, - .givencrypt = ablkcipher_givencrypt, - .geniv = "", - .min_keysize = AES_MIN_KEY_SIZE + - CTR_RFC3686_NONCE_SIZE, - .max_keysize = AES_MAX_KEY_SIZE + - CTR_RFC3686_NONCE_SIZE, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, - } -}; - -struct caam_alg_entry { - int class1_alg_type; - int class2_alg_type; - int alg_op; -}; - -struct caam_aead_alg { - struct aead_alg aead; - struct caam_alg_entry caam; - bool registered; -}; - -static struct caam_aead_alg driver_aeads[] = { + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + }, + }, { .aead = { .base = { - .cra_name = "rfc4106(gcm(aes))", - .cra_driver_name = "rfc4106-gcm-aes-caam", + .cra_name = "seqiv(authenc(hmac(sha384)," + "rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-sha384-" + "rfc3686-ctr-aes-caam", .cra_blocksize = 1, }, - .setkey = rfc4106_setkey, - .setauthsize = rfc4106_setauthsize, - .encrypt = gcm_encrypt, - .decrypt = gcm_decrypt, - .ivsize = 8, - .maxauthsize = AES_BLOCK_SIZE, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, }, .caam = { - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + .geniv = true, }, }, { .aead = { .base = { - .cra_name = "rfc4543(gcm(aes))", - .cra_driver_name = "rfc4543-gcm-aes-caam", + .cra_name = "authenc(hmac(sha512)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-sha512-" + "rfc3686-ctr-aes-caam", .cra_blocksize = 1, }, - .setkey = rfc4543_setkey, - .setauthsize = rfc4543_setauthsize, - .encrypt = gcm_encrypt, - .decrypt = gcm_decrypt, - .ivsize = 8, - .maxauthsize = AES_BLOCK_SIZE, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, }, .caam = { - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + .rfc3686 = true, }, }, - /* Galois Counter Mode */ { .aead = { .base = { - .cra_name = "gcm(aes)", - .cra_driver_name = "gcm-aes-caam", + .cra_name = "seqiv(authenc(hmac(sha512)," + "rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-sha512-" + "rfc3686-ctr-aes-caam", .cra_blocksize = 1, }, - .setkey = gcm_setkey, - .setauthsize = gcm_setauthsize, - .encrypt = gcm_encrypt, - .decrypt = gcm_decrypt, - .ivsize = 12, - .maxauthsize = AES_BLOCK_SIZE, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_givdecrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, }, .caam = { - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + .rfc3686 = true, + .geniv = true, }, }, }; @@ -4211,7 +4314,7 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template struct caam_crypto_alg *t_alg; struct crypto_alg *alg; - t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); if (!t_alg) { pr_err("failed to allocate t_alg\n"); return ERR_PTR(-ENOMEM); @@ -4240,10 +4343,6 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template alg->cra_type = &crypto_ablkcipher_type; alg->cra_ablkcipher = template->template_ablkcipher; break; - case CRYPTO_ALG_TYPE_AEAD: - alg->cra_type = &crypto_aead_type; - alg->cra_aead = template->template_aead; - break; } t_alg->caam.class1_alg_type = template->class1_alg_type; @@ -4271,8 +4370,10 @@ static int __init caam_algapi_init(void) struct device_node *dev_node; struct platform_device *pdev; struct device *ctrldev; - void *priv; + struct caam_drv_private *priv; int i = 0, err = 0; + u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst; + unsigned int md_limit = SHA512_DIGEST_SIZE; bool registered = false; dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); @@ -4302,16 +4403,39 @@ static int __init caam_algapi_init(void) INIT_LIST_HEAD(&alg_list); - /* register crypto algorithms the device supports */ + /* + * Register crypto algorithms the device supports. + * First, detect presence and attributes of DES, AES, and MD blocks. + */ + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT; + aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT; + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + + /* If MD is present, limit digest size based on LP256 */ + if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)) + md_limit = SHA256_DIGEST_SIZE; + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { - /* TODO: check if h/w supports alg */ struct caam_crypto_alg *t_alg; + struct caam_alg_template *alg = driver_algs + i; + u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK; + + /* Skip DES algorithms if not supported by device */ + if (!des_inst && + ((alg_sel == OP_ALG_ALGSEL_3DES) || + (alg_sel == OP_ALG_ALGSEL_DES))) + continue; + + /* Skip AES algorithms if not supported by device */ + if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) + continue; - t_alg = caam_alg_alloc(&driver_algs[i]); + t_alg = caam_alg_alloc(alg); if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); - pr_warn("%s alg allocation failed\n", - driver_algs[i].driver_name); + pr_warn("%s alg allocation failed\n", alg->driver_name); continue; } @@ -4329,6 +4453,37 @@ static int __init caam_algapi_init(void) for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { struct caam_aead_alg *t_alg = driver_aeads + i; + u32 c1_alg_sel = t_alg->caam.class1_alg_type & + OP_ALG_ALGSEL_MASK; + u32 c2_alg_sel = t_alg->caam.class2_alg_type & + OP_ALG_ALGSEL_MASK; + u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; + + /* Skip DES algorithms if not supported by device */ + if (!des_inst && + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || + (c1_alg_sel == OP_ALG_ALGSEL_DES))) + continue; + + /* Skip AES algorithms if not supported by device */ + if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) + continue; + + /* + * Check support for AES algorithms not available + * on LP devices. + */ + if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) + if (alg_aai == OP_ALG_AAI_GCM) + continue; + + /* + * Skip algorithms requiring message digests + * if MD or MD size is not supported by device. + */ + if (c2_alg_sel && + (!md_inst || (t_alg->aead.maxauthsize > md_limit))) + continue; caam_aead_alg_init(t_alg); diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index f9c787519..94433b9fc 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -127,7 +127,7 @@ struct caam_hash_state { int buflen_0; u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; int buflen_1; - u8 caam_ctx[MAX_CTX_LEN]; + u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; int (*update)(struct ahash_request *req); int (*final)(struct ahash_request *req); int (*finup)(struct ahash_request *req); @@ -807,7 +807,7 @@ static int ahash_update_ctx(struct ahash_request *req) * allocate space for base edesc and hw desc commands, * link tables */ - edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, @@ -829,7 +829,7 @@ static int ahash_update_ctx(struct ahash_request *req) state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, buf, state->buf_dma, - *buflen, last_buflen); + *next_buflen, *buflen); if (src_nents) { src_map_to_sec4_sg(jrdev, req->src, src_nents, @@ -919,8 +919,8 @@ static int ahash_final_ctx(struct ahash_request *req) sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + - sec4_sg_bytes, GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, + GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return -ENOMEM; @@ -1006,8 +1006,8 @@ static int ahash_finup_ctx(struct ahash_request *req) sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + - sec4_sg_bytes, GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, + GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return -ENOMEM; @@ -1092,8 +1092,8 @@ static int ahash_digest(struct ahash_request *req) sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes + - DESC_JOB_IO_LEN, GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN, + GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return -ENOMEM; @@ -1166,8 +1166,7 @@ static int ahash_final_no_ctx(struct ahash_request *req) int sh_len; /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN, - GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return -ENOMEM; @@ -1246,7 +1245,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) * allocate space for base edesc and hw desc commands, * link tables */ - edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, @@ -1354,8 +1353,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req) sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + - sec4_sg_bytes, GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, + GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return -ENOMEM; @@ -1449,7 +1448,7 @@ static int ahash_update_first(struct ahash_request *req) * allocate space for base edesc and hw desc commands, * link tables */ - edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, @@ -1843,7 +1842,7 @@ caam_hash_alloc(struct caam_hash_template *template, struct ahash_alg *halg; struct crypto_alg *alg; - t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL); + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); if (!t_alg) { pr_err("failed to allocate t_alg\n"); return ERR_PTR(-ENOMEM); @@ -1885,8 +1884,10 @@ static int __init caam_algapi_hash_init(void) struct device_node *dev_node; struct platform_device *pdev; struct device *ctrldev; - void *priv; int i = 0, err = 0; + struct caam_drv_private *priv; + unsigned int md_limit = SHA512_DIGEST_SIZE; + u32 cha_inst, cha_vid; dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); if (!dev_node) { @@ -1912,19 +1913,40 @@ static int __init caam_algapi_hash_init(void) if (!priv) return -ENODEV; + /* + * Register crypto algorithms the device supports. First, identify + * presence and attributes of MD block. + */ + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); + + /* + * Skip registration of any hashing algorithms if MD block + * is not present. + */ + if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT)) + return -ENODEV; + + /* Limit digest size based on LP256 */ + if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256) + md_limit = SHA256_DIGEST_SIZE; + INIT_LIST_HEAD(&hash_list); /* register crypto algorithms the device supports */ for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { - /* TODO: check if h/w supports alg */ struct caam_hash_alg *t_alg; + struct caam_hash_template *alg = driver_hash + i; + + /* If MD size is not supported by device, skip registration */ + if (alg->template_ahash.halg.digestsize > md_limit) + continue; /* register hmac version */ - t_alg = caam_hash_alloc(&driver_hash[i], true); + t_alg = caam_hash_alloc(alg, true); if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); - pr_warn("%s alg allocation failed\n", - driver_hash[i].driver_name); + pr_warn("%s alg allocation failed\n", alg->driver_name); continue; } @@ -1937,11 +1959,10 @@ static int __init caam_algapi_hash_init(void) list_add_tail(&t_alg->entry, &hash_list); /* register unkeyed version */ - t_alg = caam_hash_alloc(&driver_hash[i], false); + t_alg = caam_hash_alloc(alg, false); if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); - pr_warn("%s alg allocation failed\n", - driver_hash[i].driver_name); + pr_warn("%s alg allocation failed\n", alg->driver_name); continue; } diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 509533720..9b92af2c7 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -108,6 +108,10 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context) atomic_set(&bd->empty, BUF_NOT_EMPTY); complete(&bd->filled); + + /* Buffer refilled, invalidate cache */ + dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE); + #ifdef DEBUG print_hex_dump(KERN_ERR, "rng refreshed buf@: ", DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1); @@ -311,7 +315,7 @@ static int __init caam_rng_init(void) struct device_node *dev_node; struct platform_device *pdev; struct device *ctrldev; - void *priv; + struct caam_drv_private *priv; int err; dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); @@ -338,20 +342,32 @@ static int __init caam_rng_init(void) if (!priv) return -ENODEV; + /* Check for an instantiated RNG before registration */ + if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK)) + return -ENODEV; + dev = caam_jr_alloc(); if (IS_ERR(dev)) { pr_err("Job Ring Device allocation for transform failed\n"); return PTR_ERR(dev); } - rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA); - if (!rng_ctx) - return -ENOMEM; + rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA); + if (!rng_ctx) { + err = -ENOMEM; + goto free_caam_alloc; + } err = caam_init_rng(rng_ctx, dev); if (err) - return err; + goto free_rng_ctx; dev_info(dev, "registering rng-caam\n"); return hwrng_register(&caam_rng); + +free_rng_ctx: + kfree(rng_ctx); +free_caam_alloc: + caam_jr_free(dev); + return err; } module_init(caam_rng_init); diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index f57f395db..b6955ecdf 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h @@ -23,6 +23,7 @@ #include #include #include +#include #include #include diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index efacab753..8abb4bc54 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -15,6 +15,24 @@ #include "desc_constr.h" #include "error.h" +/* + * i.MX targets tend to have clock control subsystems that can + * enable/disable clocking to our device. + */ +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX +static inline struct clk *caam_drv_identify_clk(struct device *dev, + char *clk_name) +{ + return devm_clk_get(dev, clk_name); +} +#else +static inline struct clk *caam_drv_identify_clk(struct device *dev, + char *clk_name) +{ + return NULL; +} +#endif + /* * Descriptor to instantiate RNG State Handle 0 in normal mode and * load the JDKEK, TDKEK and TDSK registers @@ -121,7 +139,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, flags |= DECO_JQCR_FOUR; /* Instruct the DECO to execute it */ - wr_reg32(&deco->jr_ctl_hi, flags); + setbits32(&deco->jr_ctl_hi, flags); timeout = 10000000; do { @@ -175,7 +193,7 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask, { struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); struct caam_ctrl __iomem *ctrl; - u32 *desc, status, rdsta_val; + u32 *desc, status = 0, rdsta_val; int ret = 0, sh_idx; ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; @@ -207,7 +225,8 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask, * CAAM eras), then try again. */ rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK; - if (status || !(rdsta_val & (1 << sh_idx))) + if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) || + !(rdsta_val & (1 << sh_idx))) ret = -EAGAIN; if (ret) break; @@ -279,7 +298,7 @@ static int caam_remove(struct platform_device *pdev) struct device *ctrldev; struct caam_drv_private *ctrlpriv; struct caam_ctrl __iomem *ctrl; - int ring, ret = 0; + int ring; ctrldev = &pdev->dev; ctrlpriv = dev_get_drvdata(ctrldev); @@ -303,7 +322,13 @@ static int caam_remove(struct platform_device *pdev) /* Unmap controller region */ iounmap(ctrl); - return ret; + /* shut clocks off before finalizing shutdown */ + clk_disable_unprepare(ctrlpriv->caam_ipg); + clk_disable_unprepare(ctrlpriv->caam_mem); + clk_disable_unprepare(ctrlpriv->caam_aclk); + clk_disable_unprepare(ctrlpriv->caam_emi_slow); + + return 0; } /* @@ -370,14 +395,14 @@ static void kick_trng(struct platform_device *pdev, int ent_delay) int caam_get_era(void) { struct device_node *caam_node; - for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") { - const uint32_t *prop = (uint32_t *)of_get_property(caam_node, - "fsl,sec-era", - NULL); - return prop ? *prop : -ENOTSUPP; - } + int ret; + u32 prop; - return -ENOTSUPP; + caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); + ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop); + of_node_put(caam_node); + + return IS_ERR_VALUE(ret) ? -ENOTSUPP : prop; } EXPORT_SYMBOL(caam_get_era); @@ -390,6 +415,7 @@ static int caam_probe(struct platform_device *pdev) struct device_node *nprop, *np; struct caam_ctrl __iomem *ctrl; struct caam_drv_private *ctrlpriv; + struct clk *clk; #ifdef CONFIG_DEBUG_FS struct caam_perfmon *perfmon; #endif @@ -398,8 +424,7 @@ static int caam_probe(struct platform_device *pdev) int pg_size; int BLOCK_OFFSET = 0; - ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private), - GFP_KERNEL); + ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL); if (!ctrlpriv) return -ENOMEM; @@ -408,12 +433,76 @@ static int caam_probe(struct platform_device *pdev) ctrlpriv->pdev = pdev; nprop = pdev->dev.of_node; + /* Enable clocking */ + clk = caam_drv_identify_clk(&pdev->dev, "ipg"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + dev_err(&pdev->dev, + "can't identify CAAM ipg clk: %d\n", ret); + return ret; + } + ctrlpriv->caam_ipg = clk; + + clk = caam_drv_identify_clk(&pdev->dev, "mem"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + dev_err(&pdev->dev, + "can't identify CAAM mem clk: %d\n", ret); + return ret; + } + ctrlpriv->caam_mem = clk; + + clk = caam_drv_identify_clk(&pdev->dev, "aclk"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + dev_err(&pdev->dev, + "can't identify CAAM aclk clk: %d\n", ret); + return ret; + } + ctrlpriv->caam_aclk = clk; + + clk = caam_drv_identify_clk(&pdev->dev, "emi_slow"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + dev_err(&pdev->dev, + "can't identify CAAM emi_slow clk: %d\n", ret); + return ret; + } + ctrlpriv->caam_emi_slow = clk; + + ret = clk_prepare_enable(ctrlpriv->caam_ipg); + if (ret < 0) { + dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(ctrlpriv->caam_mem); + if (ret < 0) { + dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n", + ret); + goto disable_caam_ipg; + } + + ret = clk_prepare_enable(ctrlpriv->caam_aclk); + if (ret < 0) { + dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret); + goto disable_caam_mem; + } + + ret = clk_prepare_enable(ctrlpriv->caam_emi_slow); + if (ret < 0) { + dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n", + ret); + goto disable_caam_aclk; + } + /* Get configuration properties from device tree */ /* First, get register page */ ctrl = of_iomap(nprop, 0); if (ctrl == NULL) { dev_err(dev, "caam: of_iomap() failed\n"); - return -ENOMEM; + ret = -ENOMEM; + goto disable_caam_emi_slow; } /* Finding the page size for using the CTPR_MS register */ comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms); @@ -444,8 +533,9 @@ static int caam_probe(struct platform_device *pdev) * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, * long pointers in master configuration register */ - setbits32(&ctrl->mcr, MCFGR_WDENABLE | - (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); + clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH | + MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ? + MCFGR_LONG_PTR : 0)); /* * Read the Compile Time paramters and SCFGR to determine @@ -492,12 +582,11 @@ static int caam_probe(struct platform_device *pdev) of_device_is_compatible(np, "fsl,sec4.0-job-ring")) rspec++; - ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev, - sizeof(struct platform_device *) * rspec, - GFP_KERNEL); + ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec, + sizeof(*ctrlpriv->jrpdev), GFP_KERNEL); if (ctrlpriv->jrpdev == NULL) { - iounmap(ctrl); - return -ENOMEM; + ret = -ENOMEM; + goto iounmap_ctrl; } ring = 0; @@ -537,8 +626,8 @@ static int caam_probe(struct platform_device *pdev) /* If no QI and no rings specified, quit and go home */ if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) { dev_err(dev, "no queues configured, terminating\n"); - caam_remove(pdev); - return -ENOMEM; + ret = -ENOMEM; + goto caam_remove; } cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls); @@ -595,8 +684,7 @@ static int caam_probe(struct platform_device *pdev) } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX)); if (ret) { dev_err(dev, "failed to instantiate RNG"); - caam_remove(pdev); - return ret; + goto caam_remove; } /* * Set handles init'ed by this module as the complement of the @@ -700,6 +788,20 @@ static int caam_probe(struct platform_device *pdev) &ctrlpriv->ctl_tdsk_wrap); #endif return 0; + +caam_remove: + caam_remove(pdev); +iounmap_ctrl: + iounmap(ctrl); +disable_caam_emi_slow: + clk_disable_unprepare(ctrlpriv->caam_emi_slow); +disable_caam_aclk: + clk_disable_unprepare(ctrlpriv->caam_aclk); +disable_caam_mem: + clk_disable_unprepare(ctrlpriv->caam_mem); +disable_caam_ipg: + clk_disable_unprepare(ctrlpriv->caam_ipg); + return ret; } static struct of_device_id caam_match[] = { diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index d397ff9d5..983d663ef 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h @@ -8,12 +8,29 @@ #ifndef DESC_H #define DESC_H +/* + * 16-byte hardware scatter/gather table + * An 8-byte table exists in the hardware spec, but has never been + * implemented to date. The 8/16 option is selected at RTL-compile-time. + * and this selection is visible in the Compile Time Parameters Register + */ + +#define SEC4_SG_LEN_EXT 0x80000000 /* Entry points to table */ +#define SEC4_SG_LEN_FIN 0x40000000 /* Last ent in table */ +#define SEC4_SG_BPID_MASK 0x000000ff +#define SEC4_SG_BPID_SHIFT 16 +#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */ +#define SEC4_SG_OFFS_MASK 0x00001fff + struct sec4_sg_entry { +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX + u32 rsvd1; + dma_addr_t ptr; +#else u64 ptr; -#define SEC4_SG_LEN_FIN 0x40000000 -#define SEC4_SG_LEN_EXT 0x80000000 +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_IMX */ u32 len; - u8 reserved; + u8 rsvd2; u8 buf_pool_id; u16 offset; }; diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index 9f79fd7bd..98d07de24 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h @@ -367,7 +367,7 @@ do { \ if (upper) \ append_u64(desc, data); \ else \ - append_u32(desc, data); \ + append_u32(desc, lower_32_bits(data)); \ } while (0) #define append_math_add_imm_u64(desc, dest, src0, src1, data) \ diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index 89b94cc9e..e2bcacc1a 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h @@ -91,6 +91,11 @@ struct caam_drv_private { Handles of the RNG4 block are initialized by this driver */ + struct clk *caam_ipg; + struct clk *caam_mem; + struct clk *caam_aclk; + struct clk *caam_emi_slow; + /* * debugfs entries for developer view into driver/device * variables at runtime. diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index b8b5d47ac..f7e0d8d4c 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -202,6 +202,13 @@ static void caam_jr_dequeue(unsigned long devarg) userdesc = jrp->entinfo[sw_idx].desc_addr_virt; userstatus = jrp->outring[hw_idx].jrstatus; + /* + * Make sure all information from the job has been obtained + * before telling CAAM that the job has been removed from the + * output ring. + */ + mb(); + /* set done */ wr_reg32(&jrp->rregs->outring_rmvd, 1); @@ -351,12 +358,23 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, jrp->inpring[jrp->inp_ring_write_index] = desc_dma; + /* + * Guarantee that the descriptor's DMA address has been written to + * the next slot in the ring before the write index is updated, since + * other cores may update this index independently. + */ smp_wmb(); jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) & (JOBR_DEPTH - 1); jrp->head = (head + 1) & (JOBR_DEPTH - 1); + /* + * Ensure that all job information has been written before + * notifying CAAM that a new job was added to the input ring. + */ + wmb(); + wr_reg32(&jrp->rregs->inpring_jobadd, 1); spin_unlock_bh(&jrp->inplock); @@ -392,18 +410,17 @@ static int caam_jr_init(struct device *dev) goto out_free_irq; error = -ENOMEM; - jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, - &inpbusaddr, GFP_KERNEL); + jrp->inpring = dma_alloc_coherent(dev, sizeof(*jrp->inpring) * + JOBR_DEPTH, &inpbusaddr, GFP_KERNEL); if (!jrp->inpring) goto out_free_irq; - jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) * + jrp->outring = dma_alloc_coherent(dev, sizeof(*jrp->outring) * JOBR_DEPTH, &outbusaddr, GFP_KERNEL); if (!jrp->outring) goto out_free_inpring; - jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH, - GFP_KERNEL); + jrp->entinfo = kcalloc(JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL); if (!jrp->entinfo) goto out_free_outring; @@ -461,8 +478,7 @@ static int caam_jr_probe(struct platform_device *pdev) int error; jrdev = &pdev->dev; - jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr), - GFP_KERNEL); + jrpriv = devm_kmalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL); if (!jrpriv) return -ENOMEM; diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 672c97489..a8a799756 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -65,9 +65,31 @@ * */ +#ifdef CONFIG_ARM +/* These are common macros for Power, put here for ARM */ +#define setbits32(_addr, _v) writel((readl(_addr) | (_v)), (_addr)) +#define clrbits32(_addr, _v) writel((readl(_addr) & ~(_v)), (_addr)) + +#define out_arch(type, endian, a, v) __raw_write##type(cpu_to_##endian(v), a) +#define in_arch(type, endian, a) endian##_to_cpu(__raw_read##type(a)) + +#define out_le32(a, v) out_arch(l, le32, a, v) +#define in_le32(a) in_arch(l, le32, a) + +#define out_be32(a, v) out_arch(l, be32, a, v) +#define in_be32(a) in_arch(l, be32, a) + +#define clrsetbits(type, addr, clear, set) \ + out_##type((addr), (in_##type(addr) & ~(clear)) | (set)) + +#define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set) +#define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set) +#endif + #ifdef __BIG_ENDIAN #define wr_reg32(reg, data) out_be32(reg, data) #define rd_reg32(reg) in_be32(reg) +#define clrsetbits_32(addr, clear, set) clrsetbits_be32(addr, clear, set) #ifdef CONFIG_64BIT #define wr_reg64(reg, data) out_be64(reg, data) #define rd_reg64(reg) in_be64(reg) @@ -76,6 +98,7 @@ #ifdef __LITTLE_ENDIAN #define wr_reg32(reg, data) __raw_writel(data, reg) #define rd_reg32(reg) __raw_readl(reg) +#define clrsetbits_32(addr, clear, set) clrsetbits_le32(addr, clear, set) #ifdef CONFIG_64BIT #define wr_reg64(reg, data) __raw_writeq(data, reg) #define rd_reg64(reg) __raw_readq(reg) @@ -85,20 +108,31 @@ /* * The only users of these wr/rd_reg64 functions is the Job Ring (JR). - * The DMA address registers in the JR are a pair of 32-bit registers. - * The layout is: + * The DMA address registers in the JR are handled differently depending on + * platform: + * + * 1. All BE CAAM platforms and i.MX platforms (LE CAAM): * * base + 0x0000 : most-significant 32 bits * base + 0x0004 : least-significant 32 bits * * The 32-bit version of this core therefore has to write to base + 0x0004 - * to set the 32-bit wide DMA address. This seems to be independent of the - * endianness of the written/read data. + * to set the 32-bit wide DMA address. + * + * 2. All other LE CAAM platforms (LS1021A etc.) + * base + 0x0000 : least-significant 32 bits + * base + 0x0004 : most-significant 32 bits */ #ifndef CONFIG_64BIT +#if !defined(CONFIG_CRYPTO_DEV_FSL_CAAM_LE) || \ + defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX) #define REG64_MS32(reg) ((u32 __iomem *)(reg)) #define REG64_LS32(reg) ((u32 __iomem *)(reg) + 1) +#else +#define REG64_MS32(reg) ((u32 __iomem *)(reg) + 1) +#define REG64_LS32(reg) ((u32 __iomem *)(reg)) +#endif static inline void wr_reg64(u64 __iomem *reg, u64 data) { @@ -133,18 +167,28 @@ struct jr_outentry { #define CHA_NUM_MS_DECONUM_SHIFT 24 #define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT) -/* CHA Version IDs */ +/* + * CHA version IDs / instantiation bitfields + * Defined for use with the cha_id fields in perfmon, but the same shift/mask + * selectors can be used to pull out the number of instantiated blocks within + * cha_num fields in perfmon because the locations are the same. + */ #define CHA_ID_LS_AES_SHIFT 0 -#define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT) +#define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT) +#define CHA_ID_LS_AES_LP (0x3ull << CHA_ID_LS_AES_SHIFT) +#define CHA_ID_LS_AES_HP (0x4ull << CHA_ID_LS_AES_SHIFT) #define CHA_ID_LS_DES_SHIFT 4 -#define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT) +#define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT) #define CHA_ID_LS_ARC4_SHIFT 8 #define CHA_ID_LS_ARC4_MASK (0xfull << CHA_ID_LS_ARC4_SHIFT) #define CHA_ID_LS_MD_SHIFT 12 #define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT) +#define CHA_ID_LS_MD_LP256 (0x0ull << CHA_ID_LS_MD_SHIFT) +#define CHA_ID_LS_MD_LP512 (0x1ull << CHA_ID_LS_MD_SHIFT) +#define CHA_ID_LS_MD_HP (0x2ull << CHA_ID_LS_MD_SHIFT) #define CHA_ID_LS_RNG_SHIFT 16 #define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT) @@ -395,10 +439,16 @@ struct caam_ctrl { /* AXI read cache control */ #define MCFGR_ARCACHE_SHIFT 12 #define MCFGR_ARCACHE_MASK (0xf << MCFGR_ARCACHE_SHIFT) +#define MCFGR_ARCACHE_BUFF (0x1 << MCFGR_ARCACHE_SHIFT) +#define MCFGR_ARCACHE_CACH (0x2 << MCFGR_ARCACHE_SHIFT) +#define MCFGR_ARCACHE_RALL (0x4 << MCFGR_ARCACHE_SHIFT) /* AXI write cache control */ #define MCFGR_AWCACHE_SHIFT 8 #define MCFGR_AWCACHE_MASK (0xf << MCFGR_AWCACHE_SHIFT) +#define MCFGR_AWCACHE_BUFF (0x1 << MCFGR_AWCACHE_SHIFT) +#define MCFGR_AWCACHE_CACH (0x2 << MCFGR_AWCACHE_SHIFT) +#define MCFGR_AWCACHE_WALL (0x8 << MCFGR_AWCACHE_SHIFT) /* AXI pipeline depth */ #define MCFGR_AXIPIPE_SHIFT 4 diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h index b68b74cc7..18cd6d1f5 100644 --- a/drivers/crypto/caam/sg_sw_sec4.h +++ b/drivers/crypto/caam/sg_sw_sec4.h @@ -15,7 +15,6 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, { sec4_sg_ptr->ptr = dma; sec4_sg_ptr->len = len; - sec4_sg_ptr->reserved = 0; sec4_sg_ptr->buf_pool_id = 0; sec4_sg_ptr->offset = offset; #ifdef DEBUG @@ -106,9 +105,15 @@ static inline void dma_unmap_sg_chained( { if (unlikely(chained)) { int i; + struct scatterlist *tsg = sg; + + /* + * Use a local copy of the sg pointer to avoid moving the + * head of the list pointed to by sg as we walk the list. + */ for (i = 0; i < nents; i++) { - dma_unmap_sg(dev, sg, 1, dir); - sg = sg_next(sg); + dma_unmap_sg(dev, tsg, 1, dir); + tsg = sg_next(tsg); } } else if (nents) { dma_unmap_sg(dev, sg, nents, dir); @@ -119,19 +124,23 @@ static inline int dma_map_sg_chained( struct device *dev, struct scatterlist *sg, unsigned int nents, enum dma_data_direction dir, bool chained) { - struct scatterlist *first = sg; - if (unlikely(chained)) { int i; + struct scatterlist *tsg = sg; + + /* + * Use a local copy of the sg pointer to avoid moving the + * head of the list pointed to by sg as we walk the list. + */ for (i = 0; i < nents; i++) { - if (!dma_map_sg(dev, sg, 1, dir)) { - dma_unmap_sg_chained(dev, first, i, dir, + if (!dma_map_sg(dev, tsg, 1, dir)) { + dma_unmap_sg_chained(dev, sg, i, dir, chained); nents = 0; break; } - sg = sg_next(sg); + tsg = sg_next(tsg); } } else nents = dma_map_sg(dev, sg, nents, dir); diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c index f2e6de361..bb241c3ab 100644 --- a/drivers/crypto/ccp/ccp-platform.c +++ b/drivers/crypto/ccp/ccp-platform.c @@ -216,6 +216,7 @@ static const struct acpi_device_id ccp_acpi_match[] = { { "AMDI0C00", 0 }, { }, }; +MODULE_DEVICE_TABLE(acpi, ccp_acpi_match); #endif #ifdef CONFIG_OF @@ -223,6 +224,7 @@ static const struct of_device_id ccp_of_match[] = { { .compatible = "amd,ccp-seattle-v1a" }, { }, }; +MODULE_DEVICE_TABLE(of, ccp_of_match); #endif static struct platform_driver ccp_platform_driver = { diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index ad47d0d61..68e8aa90f 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -334,7 +334,7 @@ static int img_hash_dma_init(struct img_hash_dev *hdev) hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx"); if (!hdev->dma_lch) { - dev_err(hdev->dev, "Couldn't aquire a slave DMA channel.\n"); + dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n"); return -EBUSY; } dma_conf.direction = DMA_MEM_TO_DEV; diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 402631a19..8f2790353 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -156,7 +156,8 @@ struct ablk_ctx { }; struct aead_ctx { - struct buffer_desc *buffer; + struct buffer_desc *src; + struct buffer_desc *dst; struct scatterlist ivlist; /* used when the hmac is not on one sg entry */ u8 *hmac_virt; @@ -198,6 +199,15 @@ struct ixp_alg { int registered; }; +struct ixp_aead_alg { + struct aead_alg crypto; + const struct ix_hash_algo *hash; + u32 cfg_enc; + u32 cfg_dec; + + int registered; +}; + static const struct ix_hash_algo hash_alg_md5 = { .cfgword = 0xAA010004, .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF" @@ -339,11 +349,11 @@ static void finish_scattered_hmac(struct crypt_ctl *crypt) struct aead_ctx *req_ctx = aead_request_ctx(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); int authsize = crypto_aead_authsize(tfm); - int decryptlen = req->cryptlen - authsize; + int decryptlen = req->assoclen + req->cryptlen - authsize; if (req_ctx->encrypt) { scatterwalk_map_and_copy(req_ctx->hmac_virt, - req->src, decryptlen, authsize, 1); + req->dst, decryptlen, authsize, 1); } dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes); } @@ -364,7 +374,8 @@ static void one_packet(dma_addr_t phys) struct aead_request *req = crypt->data.aead_req; struct aead_ctx *req_ctx = aead_request_ctx(req); - free_buf_chain(dev, req_ctx->buffer, crypt->src_buf); + free_buf_chain(dev, req_ctx->src, crypt->src_buf); + free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); if (req_ctx->hmac_virt) { finish_scattered_hmac(crypt); } @@ -573,11 +584,10 @@ static int init_tfm_ablk(struct crypto_tfm *tfm) return init_tfm(tfm); } -static int init_tfm_aead(struct crypto_tfm *tfm) +static int init_tfm_aead(struct crypto_aead *tfm) { - crypto_aead_set_reqsize(__crypto_aead_cast(tfm), - sizeof(struct aead_ctx)); - return init_tfm(tfm); + crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx)); + return init_tfm(crypto_aead_tfm(tfm)); } static void exit_tfm(struct crypto_tfm *tfm) @@ -587,6 +597,11 @@ static void exit_tfm(struct crypto_tfm *tfm) free_sa_dir(&ctx->decrypt); } +static void exit_tfm_aead(struct crypto_aead *tfm) +{ + exit_tfm(crypto_aead_tfm(tfm)); +} + static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target, int init_len, u32 ctx_addr, const u8 *key, int key_len) { @@ -969,24 +984,6 @@ static int ablk_rfc3686_crypt(struct ablkcipher_request *req) return ret; } -static int hmac_inconsistent(struct scatterlist *sg, unsigned start, - unsigned int nbytes) -{ - int offset = 0; - - if (!nbytes) - return 0; - - for (;;) { - if (start < offset + sg->length) - break; - - offset += sg->length; - sg = sg_next(sg); - } - return (start + nbytes > offset + sg->length); -} - static int aead_perform(struct aead_request *req, int encrypt, int cryptoffset, int eff_cryptlen, u8 *iv) { @@ -1002,6 +999,8 @@ static int aead_perform(struct aead_request *req, int encrypt, struct device *dev = &pdev->dev; gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; + enum dma_data_direction src_direction = DMA_BIDIRECTIONAL; + unsigned int lastlen; if (qmgr_stat_full(SEND_QID)) return -EAGAIN; @@ -1030,35 +1029,55 @@ static int aead_perform(struct aead_request *req, int encrypt, crypt->crypt_len = eff_cryptlen; crypt->auth_offs = 0; - crypt->auth_len = req->assoclen + ivsize + cryptlen; + crypt->auth_len = req->assoclen + cryptlen; BUG_ON(ivsize && !req->iv); memcpy(crypt->iv, req->iv, ivsize); + req_ctx->dst = NULL; + if (req->src != req->dst) { - BUG(); /* -ENOTSUP because of my laziness */ + struct buffer_desc dst_hook; + + crypt->mode |= NPE_OP_NOT_IN_PLACE; + src_direction = DMA_TO_DEVICE; + + buf = chainup_buffers(dev, req->dst, crypt->auth_len, + &dst_hook, flags, DMA_FROM_DEVICE); + req_ctx->dst = dst_hook.next; + crypt->dst_buf = dst_hook.phys_next; + + if (!buf) + goto free_buf_dst; + + if (encrypt) { + lastlen = buf->buf_len; + if (lastlen >= authsize) + crypt->icv_rev_aes = buf->phys_addr + + buf->buf_len - authsize; + } } - /* ASSOC data */ - buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook, - flags, DMA_TO_DEVICE); - req_ctx->buffer = src_hook.next; + buf = chainup_buffers(dev, req->src, crypt->auth_len, + &src_hook, flags, src_direction); + req_ctx->src = src_hook.next; crypt->src_buf = src_hook.phys_next; if (!buf) - goto out; - /* IV */ - sg_init_table(&req_ctx->ivlist, 1); - sg_set_buf(&req_ctx->ivlist, iv, ivsize); - buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags, - DMA_BIDIRECTIONAL); - if (!buf) - goto free_chain; - if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) { + goto free_buf_src; + + if (!encrypt || !req_ctx->dst) { + lastlen = buf->buf_len; + if (lastlen >= authsize) + crypt->icv_rev_aes = buf->phys_addr + + buf->buf_len - authsize; + } + + if (unlikely(lastlen < authsize)) { /* The 12 hmac bytes are scattered, * we need to copy them into a safe buffer */ req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, &crypt->icv_rev_aes); if (unlikely(!req_ctx->hmac_virt)) - goto free_chain; + goto free_buf_src; if (!encrypt) { scatterwalk_map_and_copy(req_ctx->hmac_virt, req->src, cryptlen, authsize, 0); @@ -1067,27 +1086,16 @@ static int aead_perform(struct aead_request *req, int encrypt, } else { req_ctx->hmac_virt = NULL; } - /* Crypt */ - buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags, - DMA_BIDIRECTIONAL); - if (!buf) - goto free_hmac_virt; - if (!req_ctx->hmac_virt) { - crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize; - } crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD; qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); BUG_ON(qmgr_stat_overflow(SEND_QID)); return -EINPROGRESS; -free_hmac_virt: - if (req_ctx->hmac_virt) { - dma_pool_free(buffer_pool, req_ctx->hmac_virt, - crypt->icv_rev_aes); - } -free_chain: - free_buf_chain(dev, req_ctx->buffer, crypt->src_buf); -out: + +free_buf_src: + free_buf_chain(dev, req_ctx->src, crypt->src_buf); +free_buf_dst: + free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); crypt->ctl_flags = CTL_FLAG_UNUSED; return -ENOMEM; } @@ -1173,40 +1181,12 @@ badkey: static int aead_encrypt(struct aead_request *req) { - unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); - return aead_perform(req, 1, req->assoclen + ivsize, - req->cryptlen, req->iv); + return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv); } static int aead_decrypt(struct aead_request *req) { - unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); - return aead_perform(req, 0, req->assoclen + ivsize, - req->cryptlen, req->iv); -} - -static int aead_givencrypt(struct aead_givcrypt_request *req) -{ - struct crypto_aead *tfm = aead_givcrypt_reqtfm(req); - struct ixp_ctx *ctx = crypto_aead_ctx(tfm); - unsigned len, ivsize = crypto_aead_ivsize(tfm); - __be64 seq; - - /* copied from eseqiv.c */ - if (!ctx->salted) { - get_random_bytes(ctx->salt, ivsize); - ctx->salted = 1; - } - memcpy(req->areq.iv, ctx->salt, ivsize); - len = ivsize; - if (ivsize > sizeof(u64)) { - memset(req->giv, 0, ivsize - sizeof(u64)); - len = sizeof(u64); - } - seq = cpu_to_be64(req->seq); - memcpy(req->giv + ivsize - len, &seq, len); - return aead_perform(&req->areq, 1, req->areq.assoclen, - req->areq.cryptlen +ivsize, req->giv); + return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv); } static struct ixp_alg ixp4xx_algos[] = { @@ -1319,80 +1299,77 @@ static struct ixp_alg ixp4xx_algos[] = { }, .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR, .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR, -}, { +} }; + +static struct ixp_aead_alg ixp4xx_aeads[] = { +{ .crypto = { - .cra_name = "authenc(hmac(md5),cbc(des))", - .cra_blocksize = DES_BLOCK_SIZE, - .cra_u = { .aead = { - .ivsize = DES_BLOCK_SIZE, - .maxauthsize = MD5_DIGEST_SIZE, - } - } + .base = { + .cra_name = "authenc(hmac(md5),cbc(des))", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, }, .hash = &hash_alg_md5, .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, }, { .crypto = { - .cra_name = "authenc(hmac(md5),cbc(des3_ede))", - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_u = { .aead = { - .ivsize = DES3_EDE_BLOCK_SIZE, - .maxauthsize = MD5_DIGEST_SIZE, - } - } + .base = { + .cra_name = "authenc(hmac(md5),cbc(des3_ede))", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, }, .hash = &hash_alg_md5, .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, }, { .crypto = { - .cra_name = "authenc(hmac(sha1),cbc(des))", - .cra_blocksize = DES_BLOCK_SIZE, - .cra_u = { .aead = { + .base = { + .cra_name = "authenc(hmac(sha1),cbc(des))", + .cra_blocksize = DES_BLOCK_SIZE, + }, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, - } - } }, .hash = &hash_alg_sha1, .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, }, { .crypto = { - .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_u = { .aead = { - .ivsize = DES3_EDE_BLOCK_SIZE, - .maxauthsize = SHA1_DIGEST_SIZE, - } - } + .base = { + .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, }, .hash = &hash_alg_sha1, .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, }, { .crypto = { - .cra_name = "authenc(hmac(md5),cbc(aes))", - .cra_blocksize = AES_BLOCK_SIZE, - .cra_u = { .aead = { - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = MD5_DIGEST_SIZE, - } - } + .base = { + .cra_name = "authenc(hmac(md5),cbc(aes))", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, }, .hash = &hash_alg_md5, .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, }, { .crypto = { - .cra_name = "authenc(hmac(sha1),cbc(aes))", - .cra_blocksize = AES_BLOCK_SIZE, - .cra_u = { .aead = { - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = SHA1_DIGEST_SIZE, - } - } + .base = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, }, .hash = &hash_alg_sha1, .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, @@ -1436,32 +1413,20 @@ static int __init ixp_module_init(void) if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) { continue; } - if (!ixp4xx_algos[i].hash) { - /* block ciphers */ - cra->cra_type = &crypto_ablkcipher_type; - cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | - CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC; - if (!cra->cra_ablkcipher.setkey) - cra->cra_ablkcipher.setkey = ablk_setkey; - if (!cra->cra_ablkcipher.encrypt) - cra->cra_ablkcipher.encrypt = ablk_encrypt; - if (!cra->cra_ablkcipher.decrypt) - cra->cra_ablkcipher.decrypt = ablk_decrypt; - cra->cra_init = init_tfm_ablk; - } else { - /* authenc */ - cra->cra_type = &crypto_aead_type; - cra->cra_flags = CRYPTO_ALG_TYPE_AEAD | - CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC; - cra->cra_aead.setkey = aead_setkey; - cra->cra_aead.setauthsize = aead_setauthsize; - cra->cra_aead.encrypt = aead_encrypt; - cra->cra_aead.decrypt = aead_decrypt; - cra->cra_aead.givencrypt = aead_givencrypt; - cra->cra_init = init_tfm_aead; - } + + /* block ciphers */ + cra->cra_type = &crypto_ablkcipher_type; + cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC; + if (!cra->cra_ablkcipher.setkey) + cra->cra_ablkcipher.setkey = ablk_setkey; + if (!cra->cra_ablkcipher.encrypt) + cra->cra_ablkcipher.encrypt = ablk_encrypt; + if (!cra->cra_ablkcipher.decrypt) + cra->cra_ablkcipher.decrypt = ablk_decrypt; + cra->cra_init = init_tfm_ablk; + cra->cra_ctxsize = sizeof(struct ixp_ctx); cra->cra_module = THIS_MODULE; cra->cra_alignmask = 3; @@ -1473,6 +1438,38 @@ static int __init ixp_module_init(void) else ixp4xx_algos[i].registered = 1; } + + for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) { + struct aead_alg *cra = &ixp4xx_aeads[i].crypto; + + if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "%s"IXP_POSTFIX, cra->base.cra_name) >= + CRYPTO_MAX_ALG_NAME) + continue; + if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) + continue; + + /* authenc */ + cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC; + cra->setkey = aead_setkey; + cra->setauthsize = aead_setauthsize; + cra->encrypt = aead_encrypt; + cra->decrypt = aead_decrypt; + cra->init = init_tfm_aead; + cra->exit = exit_tfm_aead; + + cra->base.cra_ctxsize = sizeof(struct ixp_ctx); + cra->base.cra_module = THIS_MODULE; + cra->base.cra_alignmask = 3; + cra->base.cra_priority = 300; + + if (crypto_register_aead(cra)) + printk(KERN_ERR "Failed to register '%s'\n", + cra->base.cra_driver_name); + else + ixp4xx_aeads[i].registered = 1; + } return 0; } @@ -1481,6 +1478,11 @@ static void __exit ixp_module_exit(void) int num = ARRAY_SIZE(ixp4xx_algos); int i; + for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) { + if (ixp4xx_aeads[i].registered) + crypto_unregister_aead(&ixp4xx_aeads[i].crypto); + } + for (i=0; i< num; i++) { if (ixp4xx_algos[i].registered) crypto_unregister_alg(&ixp4xx_algos[i].crypto); diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 1c6f98dd8..0643e3366 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -533,7 +533,6 @@ static struct platform_driver marvell_cesa = { .probe = mv_cesa_probe, .remove = mv_cesa_remove, .driver = { - .owner = THIS_MODULE, .name = "marvell-cesa", .of_match_table = mv_cesa_of_match_table, }, diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig index e421c96c7..ad7552a69 100644 --- a/drivers/crypto/nx/Kconfig +++ b/drivers/crypto/nx/Kconfig @@ -14,11 +14,14 @@ config CRYPTO_DEV_NX_ENCRYPT config CRYPTO_DEV_NX_COMPRESS tristate "Compression acceleration support" default y + select CRYPTO_ALGAPI + select 842_DECOMPRESS help Support for PowerPC Nest (NX) compression acceleration. This module supports acceleration for compressing memory with the 842 - algorithm. One of the platform drivers must be selected also. - If you choose 'M' here, this module will be called nx_compress. + algorithm using the cryptographic API. One of the platform + drivers must be selected also. If you choose 'M' here, this + module will be called nx_compress. if CRYPTO_DEV_NX_COMPRESS @@ -42,14 +45,4 @@ config CRYPTO_DEV_NX_COMPRESS_POWERNV algorithm. This supports NX hardware on the PowerNV platform. If you choose 'M' here, this module will be called nx_compress_powernv. -config CRYPTO_DEV_NX_COMPRESS_CRYPTO - tristate "Compression acceleration cryptographic interface" - select CRYPTO_ALGAPI - select 842_DECOMPRESS - default y - help - Support for PowerPC Nest (NX) accelerators using the cryptographic - API. If you choose 'M' here, this module will be called - nx_compress_crypto. - endif diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile index e1684f5ad..b727821c8 100644 --- a/drivers/crypto/nx/Makefile +++ b/drivers/crypto/nx/Makefile @@ -10,12 +10,8 @@ nx-crypto-objs := nx.o \ nx-sha256.o \ nx-sha512.o -obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o nx-compress-platform.o -obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o -obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o -obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_CRYPTO) += nx-compress-crypto.o +obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o +obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o nx-compress-objs := nx-842.o -nx-compress-platform-objs := nx-842-platform.o nx-compress-pseries-objs := nx-842-pseries.o nx-compress-powernv-objs := nx-842-powernv.o -nx-compress-crypto-objs := nx-842-crypto.o diff --git a/drivers/crypto/nx/nx-842-crypto.c b/drivers/crypto/nx/nx-842-crypto.c deleted file mode 100644 index d53a1dcd7..000000000 --- a/drivers/crypto/nx/nx-842-crypto.c +++ /dev/null @@ -1,580 +0,0 @@ -/* - * Cryptographic API for the NX-842 hardware compression. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) IBM Corporation, 2011-2015 - * - * Original Authors: Robert Jennings - * Seth Jennings - * - * Rewrite: Dan Streetman - * - * This is an interface to the NX-842 compression hardware in PowerPC - * processors. Most of the complexity of this drvier is due to the fact that - * the NX-842 compression hardware requires the input and output data buffers - * to be specifically aligned, to be a specific multiple in length, and within - * specific minimum and maximum lengths. Those restrictions, provided by the - * nx-842 driver via nx842_constraints, mean this driver must use bounce - * buffers and headers to correct misaligned in or out buffers, and to split - * input buffers that are too large. - * - * This driver will fall back to software decompression if the hardware - * decompression fails, so this driver's decompression should never fail as - * long as the provided compressed buffer is valid. Any compressed buffer - * created by this driver will have a header (except ones where the input - * perfectly matches the constraints); so users of this driver cannot simply - * pass a compressed buffer created by this driver over to the 842 software - * decompression library. Instead, users must use this driver to decompress; - * if the hardware fails or is unavailable, the compressed buffer will be - * parsed and the header removed, and the raw 842 buffer(s) passed to the 842 - * software decompression library. - * - * This does not fall back to software compression, however, since the caller - * of this function is specifically requesting hardware compression; if the - * hardware compression fails, the caller can fall back to software - * compression, and the raw 842 compressed buffer that the software compressor - * creates can be passed to this driver for hardware decompression; any - * buffer without our specific header magic is assumed to be a raw 842 buffer - * and passed directly to the hardware. Note that the software compression - * library will produce a compressed buffer that is incompatible with the - * hardware decompressor if the original input buffer length is not a multiple - * of 8; if such a compressed buffer is passed to this driver for - * decompression, the hardware will reject it and this driver will then pass - * it over to the software library for decompression. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include - -#include "nx-842.h" - -/* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit - * template (see lib/842/842.h), so this magic number will never appear at - * the start of a raw 842 compressed buffer. That is important, as any buffer - * passed to us without this magic is assumed to be a raw 842 compressed - * buffer, and passed directly to the hardware to decompress. - */ -#define NX842_CRYPTO_MAGIC (0xf842) -#define NX842_CRYPTO_GROUP_MAX (0x20) -#define NX842_CRYPTO_HEADER_SIZE(g) \ - (sizeof(struct nx842_crypto_header) + \ - sizeof(struct nx842_crypto_header_group) * (g)) -#define NX842_CRYPTO_HEADER_MAX_SIZE \ - NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX) - -/* bounce buffer size */ -#define BOUNCE_BUFFER_ORDER (2) -#define BOUNCE_BUFFER_SIZE \ - ((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER)) - -/* try longer on comp because we can fallback to sw decomp if hw is busy */ -#define COMP_BUSY_TIMEOUT (250) /* ms */ -#define DECOMP_BUSY_TIMEOUT (50) /* ms */ - -struct nx842_crypto_header_group { - __be16 padding; /* unused bytes at start of group */ - __be32 compressed_length; /* compressed bytes in group */ - __be32 uncompressed_length; /* bytes after decompression */ -} __packed; - -struct nx842_crypto_header { - __be16 magic; /* NX842_CRYPTO_MAGIC */ - __be16 ignore; /* decompressed end bytes to ignore */ - u8 groups; /* total groups in this header */ - struct nx842_crypto_header_group group[]; -} __packed; - -struct nx842_crypto_param { - u8 *in; - unsigned int iremain; - u8 *out; - unsigned int oremain; - unsigned int ototal; -}; - -static int update_param(struct nx842_crypto_param *p, - unsigned int slen, unsigned int dlen) -{ - if (p->iremain < slen) - return -EOVERFLOW; - if (p->oremain < dlen) - return -ENOSPC; - - p->in += slen; - p->iremain -= slen; - p->out += dlen; - p->oremain -= dlen; - p->ototal += dlen; - - return 0; -} - -struct nx842_crypto_ctx { - u8 *wmem; - u8 *sbounce, *dbounce; - - struct nx842_crypto_header header; - struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX]; -}; - -static int nx842_crypto_init(struct crypto_tfm *tfm) -{ - struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->wmem = kmalloc(nx842_workmem_size(), GFP_KERNEL); - ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); - ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); - if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) { - kfree(ctx->wmem); - free_page((unsigned long)ctx->sbounce); - free_page((unsigned long)ctx->dbounce); - return -ENOMEM; - } - - return 0; -} - -static void nx842_crypto_exit(struct crypto_tfm *tfm) -{ - struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); - - kfree(ctx->wmem); - free_page((unsigned long)ctx->sbounce); - free_page((unsigned long)ctx->dbounce); -} - -static int read_constraints(struct nx842_constraints *c) -{ - int ret; - - ret = nx842_constraints(c); - if (ret) { - pr_err_ratelimited("could not get nx842 constraints : %d\n", - ret); - return ret; - } - - /* limit maximum, to always have enough bounce buffer to decompress */ - if (c->maximum > BOUNCE_BUFFER_SIZE) { - c->maximum = BOUNCE_BUFFER_SIZE; - pr_info_once("limiting nx842 maximum to %x\n", c->maximum); - } - - return 0; -} - -static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf) -{ - int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups); - - /* compress should have added space for header */ - if (s > be16_to_cpu(hdr->group[0].padding)) { - pr_err("Internal error: no space for header\n"); - return -EINVAL; - } - - memcpy(buf, hdr, s); - - print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0); - - return 0; -} - -static int compress(struct nx842_crypto_ctx *ctx, - struct nx842_crypto_param *p, - struct nx842_crypto_header_group *g, - struct nx842_constraints *c, - u16 *ignore, - unsigned int hdrsize) -{ - unsigned int slen = p->iremain, dlen = p->oremain, tmplen; - unsigned int adj_slen = slen; - u8 *src = p->in, *dst = p->out; - int ret, dskip = 0; - ktime_t timeout; - - if (p->iremain == 0) - return -EOVERFLOW; - - if (p->oremain == 0 || hdrsize + c->minimum > dlen) - return -ENOSPC; - - if (slen % c->multiple) - adj_slen = round_up(slen, c->multiple); - if (slen < c->minimum) - adj_slen = c->minimum; - if (slen > c->maximum) - adj_slen = slen = c->maximum; - if (adj_slen > slen || (u64)src % c->alignment) { - adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE); - slen = min(slen, BOUNCE_BUFFER_SIZE); - if (adj_slen > slen) - memset(ctx->sbounce + slen, 0, adj_slen - slen); - memcpy(ctx->sbounce, src, slen); - src = ctx->sbounce; - slen = adj_slen; - pr_debug("using comp sbounce buffer, len %x\n", slen); - } - - dst += hdrsize; - dlen -= hdrsize; - - if ((u64)dst % c->alignment) { - dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst); - dst += dskip; - dlen -= dskip; - } - if (dlen % c->multiple) - dlen = round_down(dlen, c->multiple); - if (dlen < c->minimum) { -nospc: - dst = ctx->dbounce; - dlen = min(p->oremain, BOUNCE_BUFFER_SIZE); - dlen = round_down(dlen, c->multiple); - dskip = 0; - pr_debug("using comp dbounce buffer, len %x\n", dlen); - } - if (dlen > c->maximum) - dlen = c->maximum; - - tmplen = dlen; - timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT); - do { - dlen = tmplen; /* reset dlen, if we're retrying */ - ret = nx842_compress(src, slen, dst, &dlen, ctx->wmem); - /* possibly we should reduce the slen here, instead of - * retrying with the dbounce buffer? - */ - if (ret == -ENOSPC && dst != ctx->dbounce) - goto nospc; - } while (ret == -EBUSY && ktime_before(ktime_get(), timeout)); - if (ret) - return ret; - - dskip += hdrsize; - - if (dst == ctx->dbounce) - memcpy(p->out + dskip, dst, dlen); - - g->padding = cpu_to_be16(dskip); - g->compressed_length = cpu_to_be32(dlen); - g->uncompressed_length = cpu_to_be32(slen); - - if (p->iremain < slen) { - *ignore = slen - p->iremain; - slen = p->iremain; - } - - pr_debug("compress slen %x ignore %x dlen %x padding %x\n", - slen, *ignore, dlen, dskip); - - return update_param(p, slen, dskip + dlen); -} - -static int nx842_crypto_compress(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); - struct nx842_crypto_header *hdr = &ctx->header; - struct nx842_crypto_param p; - struct nx842_constraints c; - unsigned int groups, hdrsize, h; - int ret, n; - bool add_header; - u16 ignore = 0; - - p.in = (u8 *)src; - p.iremain = slen; - p.out = dst; - p.oremain = *dlen; - p.ototal = 0; - - *dlen = 0; - - ret = read_constraints(&c); - if (ret) - return ret; - - groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX, - DIV_ROUND_UP(p.iremain, c.maximum)); - hdrsize = NX842_CRYPTO_HEADER_SIZE(groups); - - /* skip adding header if the buffers meet all constraints */ - add_header = (p.iremain % c.multiple || - p.iremain < c.minimum || - p.iremain > c.maximum || - (u64)p.in % c.alignment || - p.oremain % c.multiple || - p.oremain < c.minimum || - p.oremain > c.maximum || - (u64)p.out % c.alignment); - - hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC); - hdr->groups = 0; - hdr->ignore = 0; - - while (p.iremain > 0) { - n = hdr->groups++; - if (hdr->groups > NX842_CRYPTO_GROUP_MAX) - return -ENOSPC; - - /* header goes before first group */ - h = !n && add_header ? hdrsize : 0; - - if (ignore) - pr_warn("interal error, ignore is set %x\n", ignore); - - ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h); - if (ret) - return ret; - } - - if (!add_header && hdr->groups > 1) { - pr_err("Internal error: No header but multiple groups\n"); - return -EINVAL; - } - - /* ignore indicates the input stream needed to be padded */ - hdr->ignore = cpu_to_be16(ignore); - if (ignore) - pr_debug("marked %d bytes as ignore\n", ignore); - - if (add_header) - ret = nx842_crypto_add_header(hdr, dst); - if (ret) - return ret; - - *dlen = p.ototal; - - pr_debug("compress total slen %x dlen %x\n", slen, *dlen); - - return 0; -} - -static int decompress(struct nx842_crypto_ctx *ctx, - struct nx842_crypto_param *p, - struct nx842_crypto_header_group *g, - struct nx842_constraints *c, - u16 ignore, - bool usehw) -{ - unsigned int slen = be32_to_cpu(g->compressed_length); - unsigned int required_len = be32_to_cpu(g->uncompressed_length); - unsigned int dlen = p->oremain, tmplen; - unsigned int adj_slen = slen; - u8 *src = p->in, *dst = p->out; - u16 padding = be16_to_cpu(g->padding); - int ret, spadding = 0, dpadding = 0; - ktime_t timeout; - - if (!slen || !required_len) - return -EINVAL; - - if (p->iremain <= 0 || padding + slen > p->iremain) - return -EOVERFLOW; - - if (p->oremain <= 0 || required_len - ignore > p->oremain) - return -ENOSPC; - - src += padding; - - if (!usehw) - goto usesw; - - if (slen % c->multiple) - adj_slen = round_up(slen, c->multiple); - if (slen < c->minimum) - adj_slen = c->minimum; - if (slen > c->maximum) - goto usesw; - if (slen < adj_slen || (u64)src % c->alignment) { - /* we can append padding bytes because the 842 format defines - * an "end" template (see lib/842/842_decompress.c) and will - * ignore any bytes following it. - */ - if (slen < adj_slen) - memset(ctx->sbounce + slen, 0, adj_slen - slen); - memcpy(ctx->sbounce, src, slen); - src = ctx->sbounce; - spadding = adj_slen - slen; - slen = adj_slen; - pr_debug("using decomp sbounce buffer, len %x\n", slen); - } - - if (dlen % c->multiple) - dlen = round_down(dlen, c->multiple); - if (dlen < required_len || (u64)dst % c->alignment) { - dst = ctx->dbounce; - dlen = min(required_len, BOUNCE_BUFFER_SIZE); - pr_debug("using decomp dbounce buffer, len %x\n", dlen); - } - if (dlen < c->minimum) - goto usesw; - if (dlen > c->maximum) - dlen = c->maximum; - - tmplen = dlen; - timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT); - do { - dlen = tmplen; /* reset dlen, if we're retrying */ - ret = nx842_decompress(src, slen, dst, &dlen, ctx->wmem); - } while (ret == -EBUSY && ktime_before(ktime_get(), timeout)); - if (ret) { -usesw: - /* reset everything, sw doesn't have constraints */ - src = p->in + padding; - slen = be32_to_cpu(g->compressed_length); - spadding = 0; - dst = p->out; - dlen = p->oremain; - dpadding = 0; - if (dlen < required_len) { /* have ignore bytes */ - dst = ctx->dbounce; - dlen = BOUNCE_BUFFER_SIZE; - } - pr_info_ratelimited("using software 842 decompression\n"); - ret = sw842_decompress(src, slen, dst, &dlen); - } - if (ret) - return ret; - - slen -= spadding; - - dlen -= ignore; - if (ignore) - pr_debug("ignoring last %x bytes\n", ignore); - - if (dst == ctx->dbounce) - memcpy(p->out, dst, dlen); - - pr_debug("decompress slen %x padding %x dlen %x ignore %x\n", - slen, padding, dlen, ignore); - - return update_param(p, slen + padding, dlen); -} - -static int nx842_crypto_decompress(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); - struct nx842_crypto_header *hdr; - struct nx842_crypto_param p; - struct nx842_constraints c; - int n, ret, hdr_len; - u16 ignore = 0; - bool usehw = true; - - p.in = (u8 *)src; - p.iremain = slen; - p.out = dst; - p.oremain = *dlen; - p.ototal = 0; - - *dlen = 0; - - if (read_constraints(&c)) - usehw = false; - - hdr = (struct nx842_crypto_header *)src; - - /* If it doesn't start with our header magic number, assume it's a raw - * 842 compressed buffer and pass it directly to the hardware driver - */ - if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) { - struct nx842_crypto_header_group g = { - .padding = 0, - .compressed_length = cpu_to_be32(p.iremain), - .uncompressed_length = cpu_to_be32(p.oremain), - }; - - ret = decompress(ctx, &p, &g, &c, 0, usehw); - if (ret) - return ret; - - *dlen = p.ototal; - - return 0; - } - - if (!hdr->groups) { - pr_err("header has no groups\n"); - return -EINVAL; - } - if (hdr->groups > NX842_CRYPTO_GROUP_MAX) { - pr_err("header has too many groups %x, max %x\n", - hdr->groups, NX842_CRYPTO_GROUP_MAX); - return -EINVAL; - } - - hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups); - if (hdr_len > slen) - return -EOVERFLOW; - - memcpy(&ctx->header, src, hdr_len); - hdr = &ctx->header; - - for (n = 0; n < hdr->groups; n++) { - /* ignore applies to last group */ - if (n + 1 == hdr->groups) - ignore = be16_to_cpu(hdr->ignore); - - ret = decompress(ctx, &p, &hdr->group[n], &c, ignore, usehw); - if (ret) - return ret; - } - - *dlen = p.ototal; - - pr_debug("decompress total slen %x dlen %x\n", slen, *dlen); - - return 0; -} - -static struct crypto_alg alg = { - .cra_name = "842", - .cra_driver_name = "842-nx", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct nx842_crypto_ctx), - .cra_module = THIS_MODULE, - .cra_init = nx842_crypto_init, - .cra_exit = nx842_crypto_exit, - .cra_u = { .compress = { - .coa_compress = nx842_crypto_compress, - .coa_decompress = nx842_crypto_decompress } } -}; - -static int __init nx842_crypto_mod_init(void) -{ - return crypto_register_alg(&alg); -} -module_init(nx842_crypto_mod_init); - -static void __exit nx842_crypto_mod_exit(void) -{ - crypto_unregister_alg(&alg); -} -module_exit(nx842_crypto_mod_exit); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Interface"); -MODULE_ALIAS_CRYPTO("842"); -MODULE_ALIAS_CRYPTO("842-nx"); -MODULE_AUTHOR("Dan Streetman "); diff --git a/drivers/crypto/nx/nx-842-platform.c b/drivers/crypto/nx/nx-842-platform.c deleted file mode 100644 index 664f13dd0..000000000 --- a/drivers/crypto/nx/nx-842-platform.c +++ /dev/null @@ -1,84 +0,0 @@ - -#include "nx-842.h" - -/* this is needed, separate from the main nx-842.c driver, because that main - * driver loads the platform drivers during its init(), and it expects one - * (or none) of the platform drivers to set this pointer to its driver. - * That means this pointer can't be in the main nx-842 driver, because it - * wouldn't be accessible until after the main driver loaded, which wouldn't - * be possible as it's waiting for the platform driver to load. So place it - * here. - */ -static struct nx842_driver *driver; -static DEFINE_SPINLOCK(driver_lock); - -struct nx842_driver *nx842_platform_driver(void) -{ - return driver; -} -EXPORT_SYMBOL_GPL(nx842_platform_driver); - -bool nx842_platform_driver_set(struct nx842_driver *_driver) -{ - bool ret = false; - - spin_lock(&driver_lock); - - if (!driver) { - driver = _driver; - ret = true; - } else - WARN(1, "can't set platform driver, already set to %s\n", - driver->name); - - spin_unlock(&driver_lock); - return ret; -} -EXPORT_SYMBOL_GPL(nx842_platform_driver_set); - -/* only call this from the platform driver exit function */ -void nx842_platform_driver_unset(struct nx842_driver *_driver) -{ - spin_lock(&driver_lock); - - if (driver == _driver) - driver = NULL; - else if (driver) - WARN(1, "can't unset platform driver %s, currently set to %s\n", - _driver->name, driver->name); - else - WARN(1, "can't unset platform driver, already unset\n"); - - spin_unlock(&driver_lock); -} -EXPORT_SYMBOL_GPL(nx842_platform_driver_unset); - -bool nx842_platform_driver_get(void) -{ - bool ret = false; - - spin_lock(&driver_lock); - - if (driver) - ret = try_module_get(driver->owner); - - spin_unlock(&driver_lock); - - return ret; -} -EXPORT_SYMBOL_GPL(nx842_platform_driver_get); - -void nx842_platform_driver_put(void) -{ - spin_lock(&driver_lock); - - if (driver) - module_put(driver->owner); - - spin_unlock(&driver_lock); -} -EXPORT_SYMBOL_GPL(nx842_platform_driver_put); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Dan Streetman "); -MODULE_DESCRIPTION("842 H/W Compression platform driver"); diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index 33b3b0abf..3750e13d8 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c @@ -26,6 +26,8 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Dan Streetman "); MODULE_DESCRIPTION("842 H/W Compression driver for IBM PowerNV processors"); +MODULE_ALIAS_CRYPTO("842"); +MODULE_ALIAS_CRYPTO("842-nx"); #define WORKMEM_ALIGN (CRB_ALIGN) #define CSB_WAIT_MAX (5000) /* ms */ @@ -344,7 +346,8 @@ static int wait_for_csb(struct nx842_workmem *wmem, } /* successful completion */ - pr_debug_ratelimited("Processed %u bytes in %lu us\n", csb->count, + pr_debug_ratelimited("Processed %u bytes in %lu us\n", + be32_to_cpu(csb->count), (unsigned long)ktime_us_delta(now, start)); return 0; @@ -581,9 +584,29 @@ static struct nx842_driver nx842_powernv_driver = { .decompress = nx842_powernv_decompress, }; +static int nx842_powernv_crypto_init(struct crypto_tfm *tfm) +{ + return nx842_crypto_init(tfm, &nx842_powernv_driver); +} + +static struct crypto_alg nx842_powernv_alg = { + .cra_name = "842", + .cra_driver_name = "842-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, + .cra_ctxsize = sizeof(struct nx842_crypto_ctx), + .cra_module = THIS_MODULE, + .cra_init = nx842_powernv_crypto_init, + .cra_exit = nx842_crypto_exit, + .cra_u = { .compress = { + .coa_compress = nx842_crypto_compress, + .coa_decompress = nx842_crypto_decompress } } +}; + static __init int nx842_powernv_init(void) { struct device_node *dn; + int ret; /* verify workmem size/align restrictions */ BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN); @@ -594,17 +617,14 @@ static __init int nx842_powernv_init(void) BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT); BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT); - pr_info("loading\n"); - for_each_compatible_node(dn, NULL, "ibm,power-nx") nx842_powernv_probe(dn); - if (!nx842_ct) { - pr_err("no coprocessors found\n"); + if (!nx842_ct) return -ENODEV; - } - if (!nx842_platform_driver_set(&nx842_powernv_driver)) { + ret = crypto_register_alg(&nx842_powernv_alg); + if (ret) { struct nx842_coproc *coproc, *n; list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { @@ -612,11 +632,9 @@ static __init int nx842_powernv_init(void) kfree(coproc); } - return -EEXIST; + return ret; } - pr_info("loaded\n"); - return 0; } module_init(nx842_powernv_init); @@ -625,13 +643,11 @@ static void __exit nx842_powernv_exit(void) { struct nx842_coproc *coproc, *n; - nx842_platform_driver_unset(&nx842_powernv_driver); + crypto_unregister_alg(&nx842_powernv_alg); list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { list_del(&coproc->list); kfree(coproc); } - - pr_info("unloaded\n"); } module_exit(nx842_powernv_exit); diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c index 3040a6091..f4cbde03c 100644 --- a/drivers/crypto/nx/nx-842-pseries.c +++ b/drivers/crypto/nx/nx-842-pseries.c @@ -29,6 +29,8 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Robert Jennings "); MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); +MODULE_ALIAS_CRYPTO("842"); +MODULE_ALIAS_CRYPTO("842-nx"); static struct nx842_constraints nx842_pseries_constraints = { .alignment = DDE_BUFFER_ALIGN, @@ -99,11 +101,6 @@ struct nx842_workmem { #define NX842_HW_PAGE_SIZE (4096) #define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1)) -enum nx842_status { - UNAVAILABLE, - AVAILABLE -}; - struct ibm_nx842_counters { atomic64_t comp_complete; atomic64_t comp_failed; @@ -121,7 +118,6 @@ static struct nx842_devdata { unsigned int max_sg_len; unsigned int max_sync_size; unsigned int max_sync_sg; - enum nx842_status status; } __rcu *devdata; static DEFINE_SPINLOCK(devdata_mutex); @@ -230,9 +226,12 @@ static int nx842_validate_result(struct device *dev, switch (csb->completion_code) { case 0: /* Completed without error */ break; - case 64: /* Target bytes > Source bytes during compression */ + case 64: /* Compression ok, but output larger than input */ + dev_dbg(dev, "%s: output size larger than input size\n", + __func__); + break; case 13: /* Output buffer too small */ - dev_dbg(dev, "%s: Compression output larger than input\n", + dev_dbg(dev, "%s: Out of space in output buffer\n", __func__); return -ENOSPC; case 66: /* Input data contains an illegal template field */ @@ -537,41 +536,36 @@ static int nx842_OF_set_defaults(struct nx842_devdata *devdata) devdata->max_sync_size = 0; devdata->max_sync_sg = 0; devdata->max_sg_len = 0; - devdata->status = UNAVAILABLE; return 0; } else return -ENOENT; } /** - * nx842_OF_upd_status -- Update the device info from OF status prop + * nx842_OF_upd_status -- Check the device info from OF status prop * * The status property indicates if the accelerator is enabled. If the * device is in the OF tree it indicates that the hardware is present. * The status field indicates if the device is enabled when the status * is 'okay'. Otherwise the device driver will be disabled. * - * @devdata - struct nx842_devdata to update * @prop - struct property point containing the maxsyncop for the update * * Returns: * 0 - Device is available - * -EINVAL - Device is not available + * -ENODEV - Device is not available */ -static int nx842_OF_upd_status(struct nx842_devdata *devdata, - struct property *prop) { - int ret = 0; +static int nx842_OF_upd_status(struct property *prop) +{ const char *status = (const char *)prop->value; - if (!strncmp(status, "okay", (size_t)prop->length)) { - devdata->status = AVAILABLE; - } else { - dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n", - __func__, status); - devdata->status = UNAVAILABLE; - } + if (!strncmp(status, "okay", (size_t)prop->length)) + return 0; + if (!strncmp(status, "disabled", (size_t)prop->length)) + return -ENODEV; + dev_info(devdata->dev, "%s: unknown status '%s'\n", __func__, status); - return ret; + return -EINVAL; } /** @@ -735,6 +729,10 @@ static int nx842_OF_upd(struct property *new_prop) int ret = 0; unsigned long flags; + new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); + if (!new_devdata) + return -ENOMEM; + spin_lock_irqsave(&devdata_mutex, flags); old_devdata = rcu_dereference_check(devdata, lockdep_is_held(&devdata_mutex)); @@ -744,16 +742,10 @@ static int nx842_OF_upd(struct property *new_prop) if (!old_devdata || !of_node) { pr_err("%s: device is not available\n", __func__); spin_unlock_irqrestore(&devdata_mutex, flags); + kfree(new_devdata); return -ENODEV; } - new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); - if (!new_devdata) { - dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__); - ret = -ENOMEM; - goto error_out; - } - memcpy(new_devdata, old_devdata, sizeof(*old_devdata)); new_devdata->counters = old_devdata->counters; @@ -777,7 +769,7 @@ static int nx842_OF_upd(struct property *new_prop) goto out; /* Perform property updates */ - ret = nx842_OF_upd_status(new_devdata, status); + ret = nx842_OF_upd_status(status); if (ret) goto error_out; @@ -970,13 +962,43 @@ static struct nx842_driver nx842_pseries_driver = { .decompress = nx842_pseries_decompress, }; -static int __init nx842_probe(struct vio_dev *viodev, - const struct vio_device_id *id) +static int nx842_pseries_crypto_init(struct crypto_tfm *tfm) +{ + return nx842_crypto_init(tfm, &nx842_pseries_driver); +} + +static struct crypto_alg nx842_pseries_alg = { + .cra_name = "842", + .cra_driver_name = "842-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, + .cra_ctxsize = sizeof(struct nx842_crypto_ctx), + .cra_module = THIS_MODULE, + .cra_init = nx842_pseries_crypto_init, + .cra_exit = nx842_crypto_exit, + .cra_u = { .compress = { + .coa_compress = nx842_crypto_compress, + .coa_decompress = nx842_crypto_decompress } } +}; + +static int nx842_probe(struct vio_dev *viodev, + const struct vio_device_id *id) { struct nx842_devdata *old_devdata, *new_devdata = NULL; unsigned long flags; int ret = 0; + new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); + if (!new_devdata) + return -ENOMEM; + + new_devdata->counters = kzalloc(sizeof(*new_devdata->counters), + GFP_NOFS); + if (!new_devdata->counters) { + kfree(new_devdata); + return -ENOMEM; + } + spin_lock_irqsave(&devdata_mutex, flags); old_devdata = rcu_dereference_check(devdata, lockdep_is_held(&devdata_mutex)); @@ -989,21 +1011,6 @@ static int __init nx842_probe(struct vio_dev *viodev, dev_set_drvdata(&viodev->dev, NULL); - new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); - if (!new_devdata) { - dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__); - ret = -ENOMEM; - goto error_unlock; - } - - new_devdata->counters = kzalloc(sizeof(*new_devdata->counters), - GFP_NOFS); - if (!new_devdata->counters) { - dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__); - ret = -ENOMEM; - goto error_unlock; - } - new_devdata->vdev = viodev; new_devdata->dev = &viodev->dev; nx842_OF_set_defaults(new_devdata); @@ -1016,9 +1023,12 @@ static int __init nx842_probe(struct vio_dev *viodev, of_reconfig_notifier_register(&nx842_of_nb); ret = nx842_OF_upd(NULL); - if (ret && ret != -ENODEV) { - dev_err(&viodev->dev, "could not parse device tree. %d\n", ret); - ret = -1; + if (ret) + goto error; + + ret = crypto_register_alg(&nx842_pseries_alg); + if (ret) { + dev_err(&viodev->dev, "could not register comp alg: %d\n", ret); goto error; } @@ -1043,7 +1053,7 @@ error: return ret; } -static int __exit nx842_remove(struct vio_dev *viodev) +static int nx842_remove(struct vio_dev *viodev) { struct nx842_devdata *old_devdata; unsigned long flags; @@ -1051,6 +1061,8 @@ static int __exit nx842_remove(struct vio_dev *viodev) pr_info("Removing IBM Power 842 compression device\n"); sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group); + crypto_unregister_alg(&nx842_pseries_alg); + spin_lock_irqsave(&devdata_mutex, flags); old_devdata = rcu_dereference_check(devdata, lockdep_is_held(&devdata_mutex)); @@ -1074,18 +1086,16 @@ static struct vio_device_id nx842_vio_driver_ids[] = { static struct vio_driver nx842_vio_driver = { .name = KBUILD_MODNAME, .probe = nx842_probe, - .remove = __exit_p(nx842_remove), + .remove = nx842_remove, .get_desired_dma = nx842_get_desired_dma, .id_table = nx842_vio_driver_ids, }; -static int __init nx842_init(void) +static int __init nx842_pseries_init(void) { struct nx842_devdata *new_devdata; int ret; - pr_info("Registering IBM Power 842 compression driver\n"); - if (!of_find_compatible_node(NULL, NULL, "ibm,compression")) return -ENODEV; @@ -1095,7 +1105,6 @@ static int __init nx842_init(void) pr_err("Could not allocate memory for device data\n"); return -ENOMEM; } - new_devdata->status = UNAVAILABLE; RCU_INIT_POINTER(devdata, new_devdata); ret = vio_register_driver(&nx842_vio_driver); @@ -1106,24 +1115,18 @@ static int __init nx842_init(void) return ret; } - if (!nx842_platform_driver_set(&nx842_pseries_driver)) { - vio_unregister_driver(&nx842_vio_driver); - kfree(new_devdata); - return -EEXIST; - } - return 0; } -module_init(nx842_init); +module_init(nx842_pseries_init); -static void __exit nx842_exit(void) +static void __exit nx842_pseries_exit(void) { struct nx842_devdata *old_devdata; unsigned long flags; - pr_info("Exiting IBM Power 842 compression driver\n"); - nx842_platform_driver_unset(&nx842_pseries_driver); + crypto_unregister_alg(&nx842_pseries_alg); + spin_lock_irqsave(&devdata_mutex, flags); old_devdata = rcu_dereference_check(devdata, lockdep_is_held(&devdata_mutex)); @@ -1136,5 +1139,5 @@ static void __exit nx842_exit(void) vio_unregister_driver(&nx842_vio_driver); } -module_exit(nx842_exit); +module_exit(nx842_pseries_exit); diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index 6e5e0d60d..046c1c454 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c @@ -1,10 +1,5 @@ /* - * Driver frontend for IBM Power 842 compression accelerator - * - * Copyright (C) 2015 Dan Streetman, IBM Corp - * - * Designer of the Power data compression engine: - * Bulent Abali + * Cryptographic API for the NX-842 hardware compression. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -15,89 +10,522 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. + * + * Copyright (C) IBM Corporation, 2011-2015 + * + * Designer of the Power data compression engine: + * Bulent Abali + * + * Original Authors: Robert Jennings + * Seth Jennings + * + * Rewrite: Dan Streetman + * + * This is an interface to the NX-842 compression hardware in PowerPC + * processors. Most of the complexity of this drvier is due to the fact that + * the NX-842 compression hardware requires the input and output data buffers + * to be specifically aligned, to be a specific multiple in length, and within + * specific minimum and maximum lengths. Those restrictions, provided by the + * nx-842 driver via nx842_constraints, mean this driver must use bounce + * buffers and headers to correct misaligned in or out buffers, and to split + * input buffers that are too large. + * + * This driver will fall back to software decompression if the hardware + * decompression fails, so this driver's decompression should never fail as + * long as the provided compressed buffer is valid. Any compressed buffer + * created by this driver will have a header (except ones where the input + * perfectly matches the constraints); so users of this driver cannot simply + * pass a compressed buffer created by this driver over to the 842 software + * decompression library. Instead, users must use this driver to decompress; + * if the hardware fails or is unavailable, the compressed buffer will be + * parsed and the header removed, and the raw 842 buffer(s) passed to the 842 + * software decompression library. + * + * This does not fall back to software compression, however, since the caller + * of this function is specifically requesting hardware compression; if the + * hardware compression fails, the caller can fall back to software + * compression, and the raw 842 compressed buffer that the software compressor + * creates can be passed to this driver for hardware decompression; any + * buffer without our specific header magic is assumed to be a raw 842 buffer + * and passed directly to the hardware. Note that the software compression + * library will produce a compressed buffer that is incompatible with the + * hardware decompressor if the original input buffer length is not a multiple + * of 8; if such a compressed buffer is passed to this driver for + * decompression, the hardware will reject it and this driver will then pass + * it over to the software library for decompression. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include "nx-842.h" +#include +#include +#include -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Dan Streetman "); -MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); +#include "nx-842.h" -/** - * nx842_constraints - * - * This provides the driver's constraints. Different nx842 implementations - * may have varying requirements. The constraints are: - * @alignment: All buffers should be aligned to this - * @multiple: All buffer lengths should be a multiple of this - * @minimum: Buffer lengths must not be less than this amount - * @maximum: Buffer lengths must not be more than this amount - * - * The constraints apply to all buffers and lengths, both input and output, - * for both compression and decompression, except for the minimum which - * only applies to compression input and decompression output; the - * compressed data can be less than the minimum constraint. It can be - * assumed that compressed data will always adhere to the multiple - * constraint. - * - * The driver may succeed even if these constraints are violated; - * however the driver can return failure or suffer reduced performance - * if any constraint is not met. +/* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit + * template (see lib/842/842.h), so this magic number will never appear at + * the start of a raw 842 compressed buffer. That is important, as any buffer + * passed to us without this magic is assumed to be a raw 842 compressed + * buffer, and passed directly to the hardware to decompress. */ -int nx842_constraints(struct nx842_constraints *c) +#define NX842_CRYPTO_MAGIC (0xf842) +#define NX842_CRYPTO_HEADER_SIZE(g) \ + (sizeof(struct nx842_crypto_header) + \ + sizeof(struct nx842_crypto_header_group) * (g)) +#define NX842_CRYPTO_HEADER_MAX_SIZE \ + NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX) + +/* bounce buffer size */ +#define BOUNCE_BUFFER_ORDER (2) +#define BOUNCE_BUFFER_SIZE \ + ((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER)) + +/* try longer on comp because we can fallback to sw decomp if hw is busy */ +#define COMP_BUSY_TIMEOUT (250) /* ms */ +#define DECOMP_BUSY_TIMEOUT (50) /* ms */ + +struct nx842_crypto_param { + u8 *in; + unsigned int iremain; + u8 *out; + unsigned int oremain; + unsigned int ototal; +}; + +static int update_param(struct nx842_crypto_param *p, + unsigned int slen, unsigned int dlen) { - memcpy(c, nx842_platform_driver()->constraints, sizeof(*c)); + if (p->iremain < slen) + return -EOVERFLOW; + if (p->oremain < dlen) + return -ENOSPC; + + p->in += slen; + p->iremain -= slen; + p->out += dlen; + p->oremain -= dlen; + p->ototal += dlen; + return 0; } -EXPORT_SYMBOL_GPL(nx842_constraints); -/** - * nx842_workmem_size - * - * Get the amount of working memory the driver requires. - */ -size_t nx842_workmem_size(void) +int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver) { - return nx842_platform_driver()->workmem_size; + struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + + spin_lock_init(&ctx->lock); + ctx->driver = driver; + ctx->wmem = kmalloc(driver->workmem_size, GFP_KERNEL); + ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); + ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); + if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) { + kfree(ctx->wmem); + free_page((unsigned long)ctx->sbounce); + free_page((unsigned long)ctx->dbounce); + return -ENOMEM; + } + + return 0; } -EXPORT_SYMBOL_GPL(nx842_workmem_size); +EXPORT_SYMBOL_GPL(nx842_crypto_init); -int nx842_compress(const unsigned char *in, unsigned int ilen, - unsigned char *out, unsigned int *olen, void *wmem) +void nx842_crypto_exit(struct crypto_tfm *tfm) { - return nx842_platform_driver()->compress(in, ilen, out, olen, wmem); + struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + + kfree(ctx->wmem); + free_page((unsigned long)ctx->sbounce); + free_page((unsigned long)ctx->dbounce); } -EXPORT_SYMBOL_GPL(nx842_compress); +EXPORT_SYMBOL_GPL(nx842_crypto_exit); -int nx842_decompress(const unsigned char *in, unsigned int ilen, - unsigned char *out, unsigned int *olen, void *wmem) +static void check_constraints(struct nx842_constraints *c) { - return nx842_platform_driver()->decompress(in, ilen, out, olen, wmem); + /* limit maximum, to always have enough bounce buffer to decompress */ + if (c->maximum > BOUNCE_BUFFER_SIZE) + c->maximum = BOUNCE_BUFFER_SIZE; } -EXPORT_SYMBOL_GPL(nx842_decompress); -static __init int nx842_init(void) +static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf) { - request_module("nx-compress-powernv"); - request_module("nx-compress-pseries"); + int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups); - /* we prevent loading if there's no platform driver, and we get the - * module that set it so it won't unload, so we don't need to check - * if it's set in any of the above functions - */ - if (!nx842_platform_driver_get()) { - pr_err("no nx842 driver found.\n"); - return -ENODEV; + /* compress should have added space for header */ + if (s > be16_to_cpu(hdr->group[0].padding)) { + pr_err("Internal error: no space for header\n"); + return -EINVAL; } + memcpy(buf, hdr, s); + + print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0); + return 0; } -module_init(nx842_init); -static void __exit nx842_exit(void) +static int compress(struct nx842_crypto_ctx *ctx, + struct nx842_crypto_param *p, + struct nx842_crypto_header_group *g, + struct nx842_constraints *c, + u16 *ignore, + unsigned int hdrsize) +{ + unsigned int slen = p->iremain, dlen = p->oremain, tmplen; + unsigned int adj_slen = slen; + u8 *src = p->in, *dst = p->out; + int ret, dskip = 0; + ktime_t timeout; + + if (p->iremain == 0) + return -EOVERFLOW; + + if (p->oremain == 0 || hdrsize + c->minimum > dlen) + return -ENOSPC; + + if (slen % c->multiple) + adj_slen = round_up(slen, c->multiple); + if (slen < c->minimum) + adj_slen = c->minimum; + if (slen > c->maximum) + adj_slen = slen = c->maximum; + if (adj_slen > slen || (u64)src % c->alignment) { + adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE); + slen = min(slen, BOUNCE_BUFFER_SIZE); + if (adj_slen > slen) + memset(ctx->sbounce + slen, 0, adj_slen - slen); + memcpy(ctx->sbounce, src, slen); + src = ctx->sbounce; + slen = adj_slen; + pr_debug("using comp sbounce buffer, len %x\n", slen); + } + + dst += hdrsize; + dlen -= hdrsize; + + if ((u64)dst % c->alignment) { + dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst); + dst += dskip; + dlen -= dskip; + } + if (dlen % c->multiple) + dlen = round_down(dlen, c->multiple); + if (dlen < c->minimum) { +nospc: + dst = ctx->dbounce; + dlen = min(p->oremain, BOUNCE_BUFFER_SIZE); + dlen = round_down(dlen, c->multiple); + dskip = 0; + pr_debug("using comp dbounce buffer, len %x\n", dlen); + } + if (dlen > c->maximum) + dlen = c->maximum; + + tmplen = dlen; + timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT); + do { + dlen = tmplen; /* reset dlen, if we're retrying */ + ret = ctx->driver->compress(src, slen, dst, &dlen, ctx->wmem); + /* possibly we should reduce the slen here, instead of + * retrying with the dbounce buffer? + */ + if (ret == -ENOSPC && dst != ctx->dbounce) + goto nospc; + } while (ret == -EBUSY && ktime_before(ktime_get(), timeout)); + if (ret) + return ret; + + dskip += hdrsize; + + if (dst == ctx->dbounce) + memcpy(p->out + dskip, dst, dlen); + + g->padding = cpu_to_be16(dskip); + g->compressed_length = cpu_to_be32(dlen); + g->uncompressed_length = cpu_to_be32(slen); + + if (p->iremain < slen) { + *ignore = slen - p->iremain; + slen = p->iremain; + } + + pr_debug("compress slen %x ignore %x dlen %x padding %x\n", + slen, *ignore, dlen, dskip); + + return update_param(p, slen, dskip + dlen); +} + +int nx842_crypto_compress(struct crypto_tfm *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) +{ + struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct nx842_crypto_header *hdr = &ctx->header; + struct nx842_crypto_param p; + struct nx842_constraints c = *ctx->driver->constraints; + unsigned int groups, hdrsize, h; + int ret, n; + bool add_header; + u16 ignore = 0; + + check_constraints(&c); + + p.in = (u8 *)src; + p.iremain = slen; + p.out = dst; + p.oremain = *dlen; + p.ototal = 0; + + *dlen = 0; + + groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX, + DIV_ROUND_UP(p.iremain, c.maximum)); + hdrsize = NX842_CRYPTO_HEADER_SIZE(groups); + + spin_lock_bh(&ctx->lock); + + /* skip adding header if the buffers meet all constraints */ + add_header = (p.iremain % c.multiple || + p.iremain < c.minimum || + p.iremain > c.maximum || + (u64)p.in % c.alignment || + p.oremain % c.multiple || + p.oremain < c.minimum || + p.oremain > c.maximum || + (u64)p.out % c.alignment); + + hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC); + hdr->groups = 0; + hdr->ignore = 0; + + while (p.iremain > 0) { + n = hdr->groups++; + ret = -ENOSPC; + if (hdr->groups > NX842_CRYPTO_GROUP_MAX) + goto unlock; + + /* header goes before first group */ + h = !n && add_header ? hdrsize : 0; + + if (ignore) + pr_warn("interal error, ignore is set %x\n", ignore); + + ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h); + if (ret) + goto unlock; + } + + if (!add_header && hdr->groups > 1) { + pr_err("Internal error: No header but multiple groups\n"); + ret = -EINVAL; + goto unlock; + } + + /* ignore indicates the input stream needed to be padded */ + hdr->ignore = cpu_to_be16(ignore); + if (ignore) + pr_debug("marked %d bytes as ignore\n", ignore); + + if (add_header) + ret = nx842_crypto_add_header(hdr, dst); + if (ret) + goto unlock; + + *dlen = p.ototal; + + pr_debug("compress total slen %x dlen %x\n", slen, *dlen); + +unlock: + spin_unlock_bh(&ctx->lock); + return ret; +} +EXPORT_SYMBOL_GPL(nx842_crypto_compress); + +static int decompress(struct nx842_crypto_ctx *ctx, + struct nx842_crypto_param *p, + struct nx842_crypto_header_group *g, + struct nx842_constraints *c, + u16 ignore) { - nx842_platform_driver_put(); + unsigned int slen = be32_to_cpu(g->compressed_length); + unsigned int required_len = be32_to_cpu(g->uncompressed_length); + unsigned int dlen = p->oremain, tmplen; + unsigned int adj_slen = slen; + u8 *src = p->in, *dst = p->out; + u16 padding = be16_to_cpu(g->padding); + int ret, spadding = 0, dpadding = 0; + ktime_t timeout; + + if (!slen || !required_len) + return -EINVAL; + + if (p->iremain <= 0 || padding + slen > p->iremain) + return -EOVERFLOW; + + if (p->oremain <= 0 || required_len - ignore > p->oremain) + return -ENOSPC; + + src += padding; + + if (slen % c->multiple) + adj_slen = round_up(slen, c->multiple); + if (slen < c->minimum) + adj_slen = c->minimum; + if (slen > c->maximum) + goto usesw; + if (slen < adj_slen || (u64)src % c->alignment) { + /* we can append padding bytes because the 842 format defines + * an "end" template (see lib/842/842_decompress.c) and will + * ignore any bytes following it. + */ + if (slen < adj_slen) + memset(ctx->sbounce + slen, 0, adj_slen - slen); + memcpy(ctx->sbounce, src, slen); + src = ctx->sbounce; + spadding = adj_slen - slen; + slen = adj_slen; + pr_debug("using decomp sbounce buffer, len %x\n", slen); + } + + if (dlen % c->multiple) + dlen = round_down(dlen, c->multiple); + if (dlen < required_len || (u64)dst % c->alignment) { + dst = ctx->dbounce; + dlen = min(required_len, BOUNCE_BUFFER_SIZE); + pr_debug("using decomp dbounce buffer, len %x\n", dlen); + } + if (dlen < c->minimum) + goto usesw; + if (dlen > c->maximum) + dlen = c->maximum; + + tmplen = dlen; + timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT); + do { + dlen = tmplen; /* reset dlen, if we're retrying */ + ret = ctx->driver->decompress(src, slen, dst, &dlen, ctx->wmem); + } while (ret == -EBUSY && ktime_before(ktime_get(), timeout)); + if (ret) { +usesw: + /* reset everything, sw doesn't have constraints */ + src = p->in + padding; + slen = be32_to_cpu(g->compressed_length); + spadding = 0; + dst = p->out; + dlen = p->oremain; + dpadding = 0; + if (dlen < required_len) { /* have ignore bytes */ + dst = ctx->dbounce; + dlen = BOUNCE_BUFFER_SIZE; + } + pr_info_ratelimited("using software 842 decompression\n"); + ret = sw842_decompress(src, slen, dst, &dlen); + } + if (ret) + return ret; + + slen -= spadding; + + dlen -= ignore; + if (ignore) + pr_debug("ignoring last %x bytes\n", ignore); + + if (dst == ctx->dbounce) + memcpy(p->out, dst, dlen); + + pr_debug("decompress slen %x padding %x dlen %x ignore %x\n", + slen, padding, dlen, ignore); + + return update_param(p, slen + padding, dlen); } -module_exit(nx842_exit); + +int nx842_crypto_decompress(struct crypto_tfm *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) +{ + struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct nx842_crypto_header *hdr; + struct nx842_crypto_param p; + struct nx842_constraints c = *ctx->driver->constraints; + int n, ret, hdr_len; + u16 ignore = 0; + + check_constraints(&c); + + p.in = (u8 *)src; + p.iremain = slen; + p.out = dst; + p.oremain = *dlen; + p.ototal = 0; + + *dlen = 0; + + hdr = (struct nx842_crypto_header *)src; + + spin_lock_bh(&ctx->lock); + + /* If it doesn't start with our header magic number, assume it's a raw + * 842 compressed buffer and pass it directly to the hardware driver + */ + if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) { + struct nx842_crypto_header_group g = { + .padding = 0, + .compressed_length = cpu_to_be32(p.iremain), + .uncompressed_length = cpu_to_be32(p.oremain), + }; + + ret = decompress(ctx, &p, &g, &c, 0); + if (ret) + goto unlock; + + goto success; + } + + if (!hdr->groups) { + pr_err("header has no groups\n"); + ret = -EINVAL; + goto unlock; + } + if (hdr->groups > NX842_CRYPTO_GROUP_MAX) { + pr_err("header has too many groups %x, max %x\n", + hdr->groups, NX842_CRYPTO_GROUP_MAX); + ret = -EINVAL; + goto unlock; + } + + hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups); + if (hdr_len > slen) { + ret = -EOVERFLOW; + goto unlock; + } + + memcpy(&ctx->header, src, hdr_len); + hdr = &ctx->header; + + for (n = 0; n < hdr->groups; n++) { + /* ignore applies to last group */ + if (n + 1 == hdr->groups) + ignore = be16_to_cpu(hdr->ignore); + + ret = decompress(ctx, &p, &hdr->group[n], &c, ignore); + if (ret) + goto unlock; + } + +success: + *dlen = p.ototal; + + pr_debug("decompress total slen %x dlen %x\n", slen, *dlen); + + ret = 0; + +unlock: + spin_unlock_bh(&ctx->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(nx842_crypto_decompress); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Driver"); +MODULE_AUTHOR("Dan Streetman "); diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h index ac0ea79d0..a4eee3bba 100644 --- a/drivers/crypto/nx/nx-842.h +++ b/drivers/crypto/nx/nx-842.h @@ -3,8 +3,9 @@ #define __NX_842_H__ #include +#include #include -#include +#include #include #include #include @@ -104,6 +105,25 @@ static inline unsigned long nx842_get_pa(void *addr) #define GET_FIELD(v, m) (((v) & (m)) >> MASK_LSH(m)) #define SET_FIELD(v, m, val) (((v) & ~(m)) | (((val) << MASK_LSH(m)) & (m))) +/** + * This provides the driver's constraints. Different nx842 implementations + * may have varying requirements. The constraints are: + * @alignment: All buffers should be aligned to this + * @multiple: All buffer lengths should be a multiple of this + * @minimum: Buffer lengths must not be less than this amount + * @maximum: Buffer lengths must not be more than this amount + * + * The constraints apply to all buffers and lengths, both input and output, + * for both compression and decompression, except for the minimum which + * only applies to compression input and decompression output; the + * compressed data can be less than the minimum constraint. It can be + * assumed that compressed data will always adhere to the multiple + * constraint. + * + * The driver may succeed even if these constraints are violated; + * however the driver can return failure or suffer reduced performance + * if any constraint is not met. + */ struct nx842_constraints { int alignment; int multiple; @@ -126,19 +146,40 @@ struct nx842_driver { void *wrkmem); }; -struct nx842_driver *nx842_platform_driver(void); -bool nx842_platform_driver_set(struct nx842_driver *driver); -void nx842_platform_driver_unset(struct nx842_driver *driver); -bool nx842_platform_driver_get(void); -void nx842_platform_driver_put(void); +struct nx842_crypto_header_group { + __be16 padding; /* unused bytes at start of group */ + __be32 compressed_length; /* compressed bytes in group */ + __be32 uncompressed_length; /* bytes after decompression */ +} __packed; + +struct nx842_crypto_header { + __be16 magic; /* NX842_CRYPTO_MAGIC */ + __be16 ignore; /* decompressed end bytes to ignore */ + u8 groups; /* total groups in this header */ + struct nx842_crypto_header_group group[]; +} __packed; -size_t nx842_workmem_size(void); +#define NX842_CRYPTO_GROUP_MAX (0x20) -int nx842_constraints(struct nx842_constraints *constraints); +struct nx842_crypto_ctx { + spinlock_t lock; + + u8 *wmem; + u8 *sbounce, *dbounce; + + struct nx842_crypto_header header; + struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX]; + + struct nx842_driver *driver; +}; -int nx842_compress(const unsigned char *in, unsigned int in_len, - unsigned char *out, unsigned int *out_len, void *wrkmem); -int nx842_decompress(const unsigned char *in, unsigned int in_len, - unsigned char *out, unsigned int *out_len, void *wrkmem); +int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver); +void nx842_crypto_exit(struct crypto_tfm *tfm); +int nx842_crypto_compress(struct crypto_tfm *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen); +int nx842_crypto_decompress(struct crypto_tfm *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen); #endif /* __NX_842_H__ */ diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c index e4311ce0c..73ef49922 100644 --- a/drivers/crypto/nx/nx-aes-ccm.c +++ b/drivers/crypto/nx/nx-aes-ccm.c @@ -94,8 +94,6 @@ static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm, return -EINVAL; } - crypto_aead_crt(tfm)->authsize = authsize; - return 0; } @@ -111,8 +109,6 @@ static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm, return -EINVAL; } - crypto_aead_crt(tfm)->authsize = authsize; - return 0; } @@ -174,6 +170,7 @@ static int generate_pat(u8 *iv, struct nx_crypto_ctx *nx_ctx, unsigned int authsize, unsigned int nbytes, + unsigned int assoclen, u8 *out) { struct nx_sg *nx_insg = nx_ctx->in_sg; @@ -200,16 +197,16 @@ static int generate_pat(u8 *iv, * greater than 2^32. */ - if (!req->assoclen) { + if (!assoclen) { b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; - } else if (req->assoclen <= 14) { + } else if (assoclen <= 14) { /* if associated data is 14 bytes or less, we do 1 GCM * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1, * which is fed in through the source buffers here */ b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; b1 = nx_ctx->priv.ccm.iauth_tag; - iauth_len = req->assoclen; - } else if (req->assoclen <= 65280) { + iauth_len = assoclen; + } else if (assoclen <= 65280) { /* if associated data is less than (2^16 - 2^8), we construct * B1 differently and feed in the associated data to a CCA * operation */ @@ -223,7 +220,7 @@ static int generate_pat(u8 *iv, } /* generate B0 */ - rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0); + rc = generate_b0(iv, assoclen, authsize, nbytes, b0); if (rc) return rc; @@ -233,22 +230,22 @@ static int generate_pat(u8 *iv, */ if (b1) { memset(b1, 0, 16); - if (req->assoclen <= 65280) { - *(u16 *)b1 = (u16)req->assoclen; - scatterwalk_map_and_copy(b1 + 2, req->assoc, 0, + if (assoclen <= 65280) { + *(u16 *)b1 = assoclen; + scatterwalk_map_and_copy(b1 + 2, req->src, 0, iauth_len, SCATTERWALK_FROM_SG); } else { *(u16 *)b1 = (u16)(0xfffe); - *(u32 *)&b1[2] = (u32)req->assoclen; - scatterwalk_map_and_copy(b1 + 6, req->assoc, 0, + *(u32 *)&b1[2] = assoclen; + scatterwalk_map_and_copy(b1 + 6, req->src, 0, iauth_len, SCATTERWALK_FROM_SG); } } /* now copy any remaining AAD to scatterlist and call nx... */ - if (!req->assoclen) { + if (!assoclen) { return rc; - } else if (req->assoclen <= 14) { + } else if (assoclen <= 14) { unsigned int len = 16; nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen); @@ -280,7 +277,7 @@ static int generate_pat(u8 *iv, return rc; atomic_inc(&(nx_ctx->stats->aes_ops)); - atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); + atomic64_add(assoclen, &nx_ctx->stats->aes_bytes); } else { unsigned int processed = 0, to_process; @@ -294,15 +291,15 @@ static int generate_pat(u8 *iv, nx_ctx->ap->databytelen/NX_PAGE_SIZE); do { - to_process = min_t(u32, req->assoclen - processed, + to_process = min_t(u32, assoclen - processed, nx_ctx->ap->databytelen); nx_insg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen, - req->assoc, processed, + req->src, processed, &to_process); - if ((to_process + processed) < req->assoclen) { + if ((to_process + processed) < assoclen) { NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_INTERMEDIATE; } else { @@ -328,11 +325,10 @@ static int generate_pat(u8 *iv, NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION; atomic_inc(&(nx_ctx->stats->aes_ops)); - atomic64_add(req->assoclen, - &(nx_ctx->stats->aes_bytes)); + atomic64_add(assoclen, &nx_ctx->stats->aes_bytes); processed += to_process; - } while (processed < req->assoclen); + } while (processed < assoclen); result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; } @@ -343,7 +339,8 @@ static int generate_pat(u8 *iv, } static int ccm_nx_decrypt(struct aead_request *req, - struct blkcipher_desc *desc) + struct blkcipher_desc *desc, + unsigned int assoclen) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; @@ -360,10 +357,10 @@ static int ccm_nx_decrypt(struct aead_request *req, /* copy out the auth tag to compare with later */ scatterwalk_map_and_copy(priv->oauth_tag, - req->src, nbytes, authsize, + req->src, nbytes + req->assoclen, authsize, SCATTERWALK_FROM_SG); - rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, + rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen, csbcpb->cpb.aes_ccm.in_pat_or_b0); if (rc) goto out; @@ -383,8 +380,8 @@ static int ccm_nx_decrypt(struct aead_request *req, NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, - &to_process, processed, - csbcpb->cpb.aes_ccm.iv_or_ctr); + &to_process, processed + req->assoclen, + csbcpb->cpb.aes_ccm.iv_or_ctr); if (rc) goto out; @@ -420,7 +417,8 @@ out: } static int ccm_nx_encrypt(struct aead_request *req, - struct blkcipher_desc *desc) + struct blkcipher_desc *desc, + unsigned int assoclen) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; @@ -432,7 +430,7 @@ static int ccm_nx_encrypt(struct aead_request *req, spin_lock_irqsave(&nx_ctx->lock, irq_flags); - rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, + rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen, csbcpb->cpb.aes_ccm.in_pat_or_b0); if (rc) goto out; @@ -451,7 +449,7 @@ static int ccm_nx_encrypt(struct aead_request *req, NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, - &to_process, processed, + &to_process, processed + req->assoclen, csbcpb->cpb.aes_ccm.iv_or_ctr); if (rc) goto out; @@ -483,7 +481,7 @@ static int ccm_nx_encrypt(struct aead_request *req, /* copy out the auth tag */ scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac, - req->dst, nbytes, authsize, + req->dst, nbytes + req->assoclen, authsize, SCATTERWALK_TO_SG); out: @@ -503,9 +501,8 @@ static int ccm4309_aes_nx_encrypt(struct aead_request *req) memcpy(iv + 4, req->iv, 8); desc.info = iv; - desc.tfm = (struct crypto_blkcipher *)req->base.tfm; - return ccm_nx_encrypt(req, &desc); + return ccm_nx_encrypt(req, &desc, req->assoclen - 8); } static int ccm_aes_nx_encrypt(struct aead_request *req) @@ -514,13 +511,12 @@ static int ccm_aes_nx_encrypt(struct aead_request *req) int rc; desc.info = req->iv; - desc.tfm = (struct crypto_blkcipher *)req->base.tfm; rc = crypto_ccm_check_iv(desc.info); if (rc) return rc; - return ccm_nx_encrypt(req, &desc); + return ccm_nx_encrypt(req, &desc, req->assoclen); } static int ccm4309_aes_nx_decrypt(struct aead_request *req) @@ -535,9 +531,8 @@ static int ccm4309_aes_nx_decrypt(struct aead_request *req) memcpy(iv + 4, req->iv, 8); desc.info = iv; - desc.tfm = (struct crypto_blkcipher *)req->base.tfm; - return ccm_nx_decrypt(req, &desc); + return ccm_nx_decrypt(req, &desc, req->assoclen - 8); } static int ccm_aes_nx_decrypt(struct aead_request *req) @@ -546,13 +541,12 @@ static int ccm_aes_nx_decrypt(struct aead_request *req) int rc; desc.info = req->iv; - desc.tfm = (struct crypto_blkcipher *)req->base.tfm; rc = crypto_ccm_check_iv(desc.info); if (rc) return rc; - return ccm_nx_decrypt(req, &desc); + return ccm_nx_decrypt(req, &desc, req->assoclen); } /* tell the block cipher walk routines that this is a stream cipher by @@ -560,47 +554,42 @@ static int ccm_aes_nx_decrypt(struct aead_request *req) * during encrypt/decrypt doesn't solve this problem, because it calls * blkcipher_walk_done under the covers, which doesn't use walk->blocksize, * but instead uses this tfm->blocksize. */ -struct crypto_alg nx_ccm_aes_alg = { - .cra_name = "ccm(aes)", - .cra_driver_name = "ccm-aes-nx", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | - CRYPTO_ALG_NEED_FALLBACK, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_type = &crypto_aead_type, - .cra_module = THIS_MODULE, - .cra_init = nx_crypto_ctx_aes_ccm_init, - .cra_exit = nx_crypto_ctx_exit, - .cra_aead = { - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = AES_BLOCK_SIZE, - .setkey = ccm_aes_nx_set_key, - .setauthsize = ccm_aes_nx_setauthsize, - .encrypt = ccm_aes_nx_encrypt, - .decrypt = ccm_aes_nx_decrypt, - } +struct aead_alg nx_ccm_aes_alg = { + .base = { + .cra_name = "ccm(aes)", + .cra_driver_name = "ccm-aes-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_module = THIS_MODULE, + }, + .init = nx_crypto_ctx_aes_ccm_init, + .exit = nx_crypto_ctx_aead_exit, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + .setkey = ccm_aes_nx_set_key, + .setauthsize = ccm_aes_nx_setauthsize, + .encrypt = ccm_aes_nx_encrypt, + .decrypt = ccm_aes_nx_decrypt, }; -struct crypto_alg nx_ccm4309_aes_alg = { - .cra_name = "rfc4309(ccm(aes))", - .cra_driver_name = "rfc4309-ccm-aes-nx", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | - CRYPTO_ALG_NEED_FALLBACK, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_type = &crypto_nivaead_type, - .cra_module = THIS_MODULE, - .cra_init = nx_crypto_ctx_aes_ccm_init, - .cra_exit = nx_crypto_ctx_exit, - .cra_aead = { - .ivsize = 8, - .maxauthsize = AES_BLOCK_SIZE, - .setkey = ccm4309_aes_nx_set_key, - .setauthsize = ccm4309_aes_nx_setauthsize, - .encrypt = ccm4309_aes_nx_encrypt, - .decrypt = ccm4309_aes_nx_decrypt, - .geniv = "seqiv", - } +struct aead_alg nx_ccm4309_aes_alg = { + .base = { + .cra_name = "rfc4309(ccm(aes))", + .cra_driver_name = "rfc4309-ccm-aes-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_module = THIS_MODULE, + }, + .init = nx_crypto_ctx_aes_ccm_init, + .exit = nx_crypto_ctx_aead_exit, + .ivsize = 8, + .maxauthsize = AES_BLOCK_SIZE, + .setkey = ccm4309_aes_nx_set_key, + .setauthsize = ccm4309_aes_nx_setauthsize, + .encrypt = ccm4309_aes_nx_encrypt, + .decrypt = ccm4309_aes_nx_decrypt, }; diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c index dd7e9f3f5..898c0a280 100644 --- a/drivers/crypto/nx/nx-aes-ctr.c +++ b/drivers/crypto/nx/nx-aes-ctr.c @@ -144,27 +144,6 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc, return ctr_aes_nx_crypt(desc, dst, src, nbytes); } -struct crypto_alg nx_ctr_aes_alg = { - .cra_name = "ctr(aes)", - .cra_driver_name = "ctr-aes-nx", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_init = nx_crypto_ctx_aes_ctr_init, - .cra_exit = nx_crypto_ctx_exit, - .cra_blkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = ctr_aes_nx_set_key, - .encrypt = ctr_aes_nx_crypt, - .decrypt = ctr_aes_nx_crypt, - } -}; - struct crypto_alg nx_ctr3686_aes_alg = { .cra_name = "rfc3686(ctr(aes))", .cra_driver_name = "rfc3686-ctr-aes-nx", diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index 92c993f08..eee624f58 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -21,11 +21,9 @@ #include #include -#include #include #include #include -#include #include #include "nx_csbcpb.h" @@ -36,7 +34,7 @@ static int gcm_aes_nx_set_key(struct crypto_aead *tfm, const u8 *in_key, unsigned int key_len) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); + struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; @@ -75,7 +73,7 @@ static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm, const u8 *in_key, unsigned int key_len) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); + struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); char *nonce = nx_ctx->priv.gcm.nonce; int rc; @@ -110,13 +108,14 @@ static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm, static int nx_gca(struct nx_crypto_ctx *nx_ctx, struct aead_request *req, - u8 *out) + u8 *out, + unsigned int assoclen) { int rc; struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; struct scatter_walk walk; struct nx_sg *nx_sg = nx_ctx->in_sg; - unsigned int nbytes = req->assoclen; + unsigned int nbytes = assoclen; unsigned int processed = 0, to_process; unsigned int max_sg_len; @@ -167,7 +166,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx, NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION; atomic_inc(&(nx_ctx->stats->aes_ops)); - atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); + atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < nbytes); @@ -177,13 +176,15 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx, return rc; } -static int gmac(struct aead_request *req, struct blkcipher_desc *desc) +static int gmac(struct aead_request *req, struct blkcipher_desc *desc, + unsigned int assoclen) { int rc; - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_crypto_ctx *nx_ctx = + crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *nx_sg; - unsigned int nbytes = req->assoclen; + unsigned int nbytes = assoclen; unsigned int processed = 0, to_process; unsigned int max_sg_len; @@ -238,7 +239,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc) NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; atomic_inc(&(nx_ctx->stats->aes_ops)); - atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); + atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < nbytes); @@ -253,7 +254,8 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc, int enc) { int rc; - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_crypto_ctx *nx_ctx = + crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; char out[AES_BLOCK_SIZE]; struct nx_sg *in_sg, *out_sg; @@ -314,9 +316,11 @@ out: return rc; } -static int gcm_aes_nx_crypt(struct aead_request *req, int enc) +static int gcm_aes_nx_crypt(struct aead_request *req, int enc, + unsigned int assoclen) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_crypto_ctx *nx_ctx = + crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_gcm_rctx *rctx = aead_request_ctx(req); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct blkcipher_desc desc; @@ -332,10 +336,10 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; if (nbytes == 0) { - if (req->assoclen == 0) + if (assoclen == 0) rc = gcm_empty(req, &desc, enc); else - rc = gmac(req, &desc); + rc = gmac(req, &desc, assoclen); if (rc) goto out; else @@ -343,9 +347,10 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) } /* Process associated data */ - csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8; - if (req->assoclen) { - rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad); + csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8; + if (assoclen) { + rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad, + assoclen); if (rc) goto out; } @@ -363,7 +368,6 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) to_process = nbytes - processed; csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; - desc.tfm = (struct crypto_blkcipher *) req->base.tfm; rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, req->src, &to_process, processed + req->assoclen, @@ -430,7 +434,7 @@ static int gcm_aes_nx_encrypt(struct aead_request *req) memcpy(iv, req->iv, 12); - return gcm_aes_nx_crypt(req, 1); + return gcm_aes_nx_crypt(req, 1, req->assoclen); } static int gcm_aes_nx_decrypt(struct aead_request *req) @@ -440,12 +444,13 @@ static int gcm_aes_nx_decrypt(struct aead_request *req) memcpy(iv, req->iv, 12); - return gcm_aes_nx_crypt(req, 0); + return gcm_aes_nx_crypt(req, 0, req->assoclen); } static int gcm4106_aes_nx_encrypt(struct aead_request *req) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_crypto_ctx *nx_ctx = + crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_gcm_rctx *rctx = aead_request_ctx(req); char *iv = rctx->iv; char *nonce = nx_ctx->priv.gcm.nonce; @@ -453,12 +458,16 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req) memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); - return gcm_aes_nx_crypt(req, 1); + if (req->assoclen < 8) + return -EINVAL; + + return gcm_aes_nx_crypt(req, 1, req->assoclen - 8); } static int gcm4106_aes_nx_decrypt(struct aead_request *req) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_crypto_ctx *nx_ctx = + crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_gcm_rctx *rctx = aead_request_ctx(req); char *iv = rctx->iv; char *nonce = nx_ctx->priv.gcm.nonce; @@ -466,7 +475,10 @@ static int gcm4106_aes_nx_decrypt(struct aead_request *req) memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); - return gcm_aes_nx_crypt(req, 0); + if (req->assoclen < 8) + return -EINVAL; + + return gcm_aes_nx_crypt(req, 0, req->assoclen - 8); } /* tell the block cipher walk routines that this is a stream cipher by diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 436971343..0794f1cc0 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -596,13 +596,9 @@ static int nx_register_algs(void) if (rc) goto out_unreg_ecb; - rc = nx_register_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); - if (rc) - goto out_unreg_cbc; - rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); if (rc) - goto out_unreg_ctr; + goto out_unreg_cbc; rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); if (rc) @@ -612,11 +608,11 @@ static int nx_register_algs(void) if (rc) goto out_unreg_gcm; - rc = nx_register_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); + rc = nx_register_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); if (rc) goto out_unreg_gcm4106; - rc = nx_register_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); + rc = nx_register_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); if (rc) goto out_unreg_ccm; @@ -644,17 +640,15 @@ out_unreg_s256: nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256); out_unreg_ccm4309: - nx_unregister_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); + nx_unregister_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); out_unreg_ccm: - nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); + nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); out_unreg_gcm4106: nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); out_unreg_gcm: nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); out_unreg_ctr3686: nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); -out_unreg_ctr: - nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); out_unreg_cbc: nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); out_unreg_ecb: @@ -711,11 +705,10 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode) } /* entry points from the crypto tfm initializers */ -int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm) +int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm) { - crypto_aead_set_reqsize(__crypto_aead_cast(tfm), - sizeof(struct nx_ccm_rctx)); - return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, + crypto_aead_set_reqsize(tfm, sizeof(struct nx_ccm_rctx)); + return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES, NX_MODE_AES_CCM); } @@ -813,16 +806,15 @@ static int nx_remove(struct vio_dev *viodev) NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256); nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512); - nx_unregister_alg(&nx_ccm4309_aes_alg, - NX_FC_AES, NX_MODE_AES_CCM); - nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); + nx_unregister_aead(&nx_ccm4309_aes_alg, + NX_FC_AES, NX_MODE_AES_CCM); + nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); - nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); } diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index cdff03a42..9347878d4 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h @@ -149,8 +149,10 @@ struct nx_crypto_ctx { } priv; }; +struct crypto_aead; + /* prototypes */ -int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm); +int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm); int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm); int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm); int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm); @@ -187,10 +189,9 @@ extern struct crypto_alg nx_cbc_aes_alg; extern struct crypto_alg nx_ecb_aes_alg; extern struct aead_alg nx_gcm_aes_alg; extern struct aead_alg nx_gcm4106_aes_alg; -extern struct crypto_alg nx_ctr_aes_alg; extern struct crypto_alg nx_ctr3686_aes_alg; -extern struct crypto_alg nx_ccm_aes_alg; -extern struct crypto_alg nx_ccm4309_aes_alg; +extern struct aead_alg nx_ccm_aes_alg; +extern struct aead_alg nx_ccm4309_aes_alg; extern struct shash_alg nx_shash_aes_xcbc_alg; extern struct shash_alg nx_shash_sha512_alg; extern struct shash_alg nx_shash_sha256_alg; diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 9a28b7e07..eba23147c 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -52,29 +52,30 @@ #define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04)) #define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs) -#define AES_REG_CTRL_CTR_WIDTH_MASK (3 << 7) -#define AES_REG_CTRL_CTR_WIDTH_32 (0 << 7) -#define AES_REG_CTRL_CTR_WIDTH_64 (1 << 7) -#define AES_REG_CTRL_CTR_WIDTH_96 (2 << 7) -#define AES_REG_CTRL_CTR_WIDTH_128 (3 << 7) -#define AES_REG_CTRL_CTR (1 << 6) -#define AES_REG_CTRL_CBC (1 << 5) -#define AES_REG_CTRL_KEY_SIZE (3 << 3) -#define AES_REG_CTRL_DIRECTION (1 << 2) -#define AES_REG_CTRL_INPUT_READY (1 << 1) -#define AES_REG_CTRL_OUTPUT_READY (1 << 0) +#define AES_REG_CTRL_CTR_WIDTH_MASK GENMASK(8, 7) +#define AES_REG_CTRL_CTR_WIDTH_32 0 +#define AES_REG_CTRL_CTR_WIDTH_64 BIT(7) +#define AES_REG_CTRL_CTR_WIDTH_96 BIT(8) +#define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7) +#define AES_REG_CTRL_CTR BIT(6) +#define AES_REG_CTRL_CBC BIT(5) +#define AES_REG_CTRL_KEY_SIZE GENMASK(4, 3) +#define AES_REG_CTRL_DIRECTION BIT(2) +#define AES_REG_CTRL_INPUT_READY BIT(1) +#define AES_REG_CTRL_OUTPUT_READY BIT(0) +#define AES_REG_CTRL_MASK GENMASK(24, 2) #define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04)) #define AES_REG_REV(dd) ((dd)->pdata->rev_ofs) #define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs) -#define AES_REG_MASK_SIDLE (1 << 6) -#define AES_REG_MASK_START (1 << 5) -#define AES_REG_MASK_DMA_OUT_EN (1 << 3) -#define AES_REG_MASK_DMA_IN_EN (1 << 2) -#define AES_REG_MASK_SOFTRESET (1 << 1) -#define AES_REG_AUTOIDLE (1 << 0) +#define AES_REG_MASK_SIDLE BIT(6) +#define AES_REG_MASK_START BIT(5) +#define AES_REG_MASK_DMA_OUT_EN BIT(3) +#define AES_REG_MASK_DMA_IN_EN BIT(2) +#define AES_REG_MASK_SOFTRESET BIT(1) +#define AES_REG_AUTOIDLE BIT(0) #define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04)) @@ -254,7 +255,7 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd) { unsigned int key32; int i, err; - u32 val, mask = 0; + u32 val; err = omap_aes_hw_init(dd); if (err) @@ -274,17 +275,13 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd) val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); if (dd->flags & FLAGS_CBC) val |= AES_REG_CTRL_CBC; - if (dd->flags & FLAGS_CTR) { + if (dd->flags & FLAGS_CTR) val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128; - mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK; - } + if (dd->flags & FLAGS_ENCRYPT) val |= AES_REG_CTRL_DIRECTION; - mask |= AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION | - AES_REG_CTRL_KEY_SIZE; - - omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, mask); + omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK); return 0; } @@ -558,6 +555,9 @@ static int omap_aes_check_aligned(struct scatterlist *sg, int total) { int len = 0; + if (!IS_ALIGNED(total, AES_BLOCK_SIZE)) + return -EINVAL; + while (sg) { if (!IS_ALIGNED(sg->offset, 4)) return -1; @@ -577,9 +577,10 @@ static int omap_aes_check_aligned(struct scatterlist *sg, int total) static int omap_aes_copy_sgs(struct omap_aes_dev *dd) { void *buf_in, *buf_out; - int pages; + int pages, total; - pages = get_order(dd->total); + total = ALIGN(dd->total, AES_BLOCK_SIZE); + pages = get_order(total); buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages); buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages); @@ -594,11 +595,11 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd) sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0); sg_init_table(&dd->in_sgl, 1); - sg_set_buf(&dd->in_sgl, buf_in, dd->total); + sg_set_buf(&dd->in_sgl, buf_in, total); dd->in_sg = &dd->in_sgl; sg_init_table(&dd->out_sgl, 1); - sg_set_buf(&dd->out_sgl, buf_out, dd->total); + sg_set_buf(&dd->out_sgl, buf_out, total); dd->out_sg = &dd->out_sgl; return 0; @@ -611,7 +612,7 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, struct omap_aes_ctx *ctx; struct omap_aes_reqctx *rctx; unsigned long flags; - int err, ret = 0; + int err, ret = 0, len; spin_lock_irqsave(&dd->lock, flags); if (req) @@ -650,8 +651,9 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, dd->sgs_copied = 0; } - dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total); - dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total); + len = ALIGN(dd->total, AES_BLOCK_SIZE); + dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, len); + dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, len); BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0); rctx = ablkcipher_request_ctx(req); @@ -678,7 +680,7 @@ static void omap_aes_done_task(unsigned long data) { struct omap_aes_dev *dd = (struct omap_aes_dev *)data; void *buf_in, *buf_out; - int pages; + int pages, len; pr_debug("enter done_task\n"); @@ -697,7 +699,8 @@ static void omap_aes_done_task(unsigned long data) sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1); - pages = get_order(dd->total_save); + len = ALIGN(dd->total_save, AES_BLOCK_SIZE); + pages = get_order(len); free_pages((unsigned long)buf_in, pages); free_pages((unsigned long)buf_out, pages); } @@ -726,11 +729,6 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC)); - if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { - pr_err("request size is not exact amount of AES blocks\n"); - return -EINVAL; - } - dd = omap_aes_find_dev(ctx); if (!dd) return -ENODEV; @@ -833,7 +831,7 @@ static struct crypto_alg algs_ecb_cbc[] = { { .cra_name = "ecb(aes)", .cra_driver_name = "ecb-aes-omap", - .cra_priority = 100, + .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, @@ -855,7 +853,7 @@ static struct crypto_alg algs_ecb_cbc[] = { { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-omap", - .cra_priority = 100, + .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, @@ -881,7 +879,7 @@ static struct crypto_alg algs_ctr[] = { { .cra_name = "ctr(aes)", .cra_driver_name = "ctr-aes-omap", - .cra_priority = 100, + .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, @@ -1046,9 +1044,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id) } } - dd->total -= AES_BLOCK_SIZE; - - BUG_ON(dd->total < 0); + dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total); /* Clear IRQ status */ status &= ~AES_REG_IRQ_DATA_OUT; diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index b2024c95a..48adb2a09 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -588,7 +588,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, * the dmaengine may try to DMA the incorrect amount of data. */ sg_init_table(&ctx->sgl, 1); - ctx->sgl.page_link = ctx->sg->page_link; + sg_assign_page(&ctx->sgl, sg_page(ctx->sg)); ctx->sgl.offset = ctx->sg->offset; sg_dma_len(&ctx->sgl) = len32; sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg); diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 4f56f3681..da36de26a 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -99,11 +99,16 @@ struct spacc_req { dma_addr_t src_addr, dst_addr; struct spacc_ddt *src_ddt, *dst_ddt; void (*complete)(struct spacc_req *req); +}; - /* AEAD specific bits. */ - u8 *giv; - size_t giv_len; - dma_addr_t giv_pa; +struct spacc_aead { + unsigned long ctrl_default; + unsigned long type; + struct aead_alg alg; + struct spacc_engine *engine; + struct list_head entry; + int key_offs; + int iv_offs; }; struct spacc_engine { @@ -121,6 +126,9 @@ struct spacc_engine { struct spacc_alg *algs; unsigned num_algs; struct list_head registered_algs; + struct spacc_aead *aeads; + unsigned num_aeads; + struct list_head registered_aeads; size_t cipher_pg_sz; size_t hash_pg_sz; const char *name; @@ -174,8 +182,6 @@ struct spacc_aead_ctx { u8 cipher_key_len; u8 hash_key_len; struct crypto_aead *sw_cipher; - size_t auth_size; - u8 salt[AES_BLOCK_SIZE]; }; static int spacc_ablk_submit(struct spacc_req *req); @@ -185,6 +191,11 @@ static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg) return alg ? container_of(alg, struct spacc_alg, alg) : NULL; } +static inline struct spacc_aead *to_spacc_aead(struct aead_alg *alg) +{ + return container_of(alg, struct spacc_aead, alg); +} + static inline int spacc_fifo_cmd_full(struct spacc_engine *engine) { u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET); @@ -310,120 +321,117 @@ out: return NULL; } -static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv) +static int spacc_aead_make_ddts(struct aead_request *areq) { - struct aead_request *areq = container_of(req->req, struct aead_request, - base); + struct crypto_aead *aead = crypto_aead_reqtfm(areq); + struct spacc_req *req = aead_request_ctx(areq); struct spacc_engine *engine = req->engine; struct spacc_ddt *src_ddt, *dst_ddt; - unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq)); - unsigned nents = sg_count(areq->src, areq->cryptlen); unsigned total; - dma_addr_t iv_addr; + unsigned int src_nents, dst_nents; struct scatterlist *cur; - int i, dst_ents, src_ents, assoc_ents; - u8 *iv = giv ? giv : areq->iv; + int i, dst_ents, src_ents; + + total = areq->assoclen + areq->cryptlen; + if (req->is_encrypt) + total += crypto_aead_authsize(aead); + + src_nents = sg_count(areq->src, total); + if (src_nents + 1 > MAX_DDT_LEN) + return -E2BIG; + + dst_nents = 0; + if (areq->src != areq->dst) { + dst_nents = sg_count(areq->dst, total); + if (src_nents + 1 > MAX_DDT_LEN) + return -E2BIG; + } src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr); if (!src_ddt) - return -ENOMEM; + goto err; dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr); - if (!dst_ddt) { - dma_pool_free(engine->req_pool, src_ddt, req->src_addr); - return -ENOMEM; - } + if (!dst_ddt) + goto err_free_src; req->src_ddt = src_ddt; req->dst_ddt = dst_ddt; - assoc_ents = dma_map_sg(engine->dev, areq->assoc, - sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE); - if (areq->src != areq->dst) { - src_ents = dma_map_sg(engine->dev, areq->src, nents, + if (dst_nents) { + src_ents = dma_map_sg(engine->dev, areq->src, src_nents, DMA_TO_DEVICE); - dst_ents = dma_map_sg(engine->dev, areq->dst, nents, + if (!src_ents) + goto err_free_dst; + + dst_ents = dma_map_sg(engine->dev, areq->dst, dst_nents, DMA_FROM_DEVICE); + + if (!dst_ents) { + dma_unmap_sg(engine->dev, areq->src, src_nents, + DMA_TO_DEVICE); + goto err_free_dst; + } } else { - src_ents = dma_map_sg(engine->dev, areq->src, nents, + src_ents = dma_map_sg(engine->dev, areq->src, src_nents, DMA_BIDIRECTIONAL); - dst_ents = 0; + if (!src_ents) + goto err_free_dst; + dst_ents = src_ents; } /* - * Map the IV/GIV. For the GIV it needs to be bidirectional as it is - * formed by the crypto block and sent as the ESP IV for IPSEC. + * Now map in the payload for the source and destination and terminate + * with the NULL pointers. */ - iv_addr = dma_map_single(engine->dev, iv, ivsize, - giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); - req->giv_pa = iv_addr; + for_each_sg(areq->src, cur, src_ents, i) + ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); - /* - * Map the associated data. For decryption we don't copy the - * associated data. - */ - total = areq->assoclen; - for_each_sg(areq->assoc, cur, assoc_ents, i) { + /* For decryption we need to skip the associated data. */ + total = req->is_encrypt ? 0 : areq->assoclen; + for_each_sg(areq->dst, cur, dst_ents, i) { unsigned len = sg_dma_len(cur); - if (len > total) - len = total; - - total -= len; + if (len <= total) { + total -= len; + continue; + } - ddt_set(src_ddt++, sg_dma_address(cur), len); - if (req->is_encrypt) - ddt_set(dst_ddt++, sg_dma_address(cur), len); + ddt_set(dst_ddt++, sg_dma_address(cur) + total, len - total); } - ddt_set(src_ddt++, iv_addr, ivsize); - - if (giv || req->is_encrypt) - ddt_set(dst_ddt++, iv_addr, ivsize); - - /* - * Now map in the payload for the source and destination and terminate - * with the NULL pointers. - */ - for_each_sg(areq->src, cur, src_ents, i) { - ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); - if (areq->src == areq->dst) - ddt_set(dst_ddt++, sg_dma_address(cur), - sg_dma_len(cur)); - } - - for_each_sg(areq->dst, cur, dst_ents, i) - ddt_set(dst_ddt++, sg_dma_address(cur), - sg_dma_len(cur)); ddt_set(src_ddt, 0, 0); ddt_set(dst_ddt, 0, 0); return 0; + +err_free_dst: + dma_pool_free(engine->req_pool, dst_ddt, req->dst_addr); +err_free_src: + dma_pool_free(engine->req_pool, src_ddt, req->src_addr); +err: + return -ENOMEM; } static void spacc_aead_free_ddts(struct spacc_req *req) { struct aead_request *areq = container_of(req->req, struct aead_request, base); - struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg); - struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm); + struct crypto_aead *aead = crypto_aead_reqtfm(areq); + unsigned total = areq->assoclen + areq->cryptlen + + (req->is_encrypt ? crypto_aead_authsize(aead) : 0); + struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead); struct spacc_engine *engine = aead_ctx->generic.engine; - unsigned ivsize = alg->alg.cra_aead.ivsize; - unsigned nents = sg_count(areq->src, areq->cryptlen); + unsigned nents = sg_count(areq->src, total); if (areq->src != areq->dst) { dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE); dma_unmap_sg(engine->dev, areq->dst, - sg_count(areq->dst, areq->cryptlen), + sg_count(areq->dst, total), DMA_FROM_DEVICE); } else dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL); - dma_unmap_sg(engine->dev, areq->assoc, - sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE); - - dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL); - dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr); dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr); } @@ -438,65 +446,22 @@ static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt, dma_pool_free(req->engine->req_pool, ddt, ddt_addr); } -/* - * Set key for a DES operation in an AEAD cipher. This also performs weak key - * checking if required. - */ -static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key, - unsigned int len) -{ - struct crypto_tfm *tfm = crypto_aead_tfm(aead); - struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); - u32 tmp[DES_EXPKEY_WORDS]; - - if (unlikely(!des_ekey(tmp, key)) && - (crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) { - tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; - return -EINVAL; - } - - memcpy(ctx->cipher_key, key, len); - ctx->cipher_key_len = len; - - return 0; -} - -/* Set the key for the AES block cipher component of the AEAD transform. */ -static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key, - unsigned int len) -{ - struct crypto_tfm *tfm = crypto_aead_tfm(aead); - struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); - - /* - * IPSec engine only supports 128 and 256 bit AES keys. If we get a - * request for any other size (192 bits) then we need to do a software - * fallback. - */ - if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) { - /* - * Set the fallback transform to use the same request flags as - * the hardware transform. - */ - ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; - ctx->sw_cipher->base.crt_flags |= - tfm->crt_flags & CRYPTO_TFM_REQ_MASK; - return crypto_aead_setkey(ctx->sw_cipher, key, len); - } - - memcpy(ctx->cipher_key, key, len); - ctx->cipher_key_len = len; - - return 0; -} - static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); struct crypto_authenc_keys keys; - int err = -EINVAL; + int err; + + crypto_aead_clear_flags(ctx->sw_cipher, CRYPTO_TFM_REQ_MASK); + crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + err = crypto_aead_setkey(ctx->sw_cipher, key, keylen); + crypto_aead_clear_flags(tfm, CRYPTO_TFM_RES_MASK); + crypto_aead_set_flags(tfm, crypto_aead_get_flags(ctx->sw_cipher) & + CRYPTO_TFM_RES_MASK); + if (err) + return err; if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto badkey; @@ -507,14 +472,8 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, if (keys.authkeylen > sizeof(ctx->hash_ctx)) goto badkey; - if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == - SPA_CTRL_CIPH_ALG_AES) - err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen); - else - err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen); - - if (err) - goto badkey; + memcpy(ctx->cipher_key, keys.enckey, keys.enckeylen); + ctx->cipher_key_len = keys.enckeylen; memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen); ctx->hash_key_len = keys.authkeylen; @@ -531,9 +490,7 @@ static int spacc_aead_setauthsize(struct crypto_aead *tfm, { struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm)); - ctx->auth_size = authsize; - - return 0; + return crypto_aead_setauthsize(ctx->sw_cipher, authsize); } /* @@ -541,15 +498,13 @@ static int spacc_aead_setauthsize(struct crypto_aead *tfm, * be completed in hardware because the hardware may not support certain key * sizes. In these cases we need to complete the request in software. */ -static int spacc_aead_need_fallback(struct spacc_req *req) +static int spacc_aead_need_fallback(struct aead_request *aead_req) { - struct aead_request *aead_req; - struct crypto_tfm *tfm = req->req->tfm; - struct crypto_alg *alg = req->req->tfm->__crt_alg; - struct spacc_alg *spacc_alg = to_spacc_alg(alg); - struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); + struct aead_alg *alg = crypto_aead_alg(aead); + struct spacc_aead *spacc_alg = to_spacc_aead(alg); + struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead); - aead_req = container_of(req->req, struct aead_request, base); /* * If we have a non-supported key-length, then we need to do a * software fallback. @@ -568,22 +523,17 @@ static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type, { struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req)); struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm); - int err; + struct aead_request *subreq = aead_request_ctx(req); - if (ctx->sw_cipher) { - /* - * Change the request to use the software fallback transform, - * and once the ciphering has completed, put the old transform - * back into the request. - */ - aead_request_set_tfm(req, ctx->sw_cipher); - err = is_encrypt ? crypto_aead_encrypt(req) : - crypto_aead_decrypt(req); - aead_request_set_tfm(req, __crypto_aead_cast(old_tfm)); - } else - err = -EINVAL; + aead_request_set_tfm(subreq, ctx->sw_cipher); + aead_request_set_callback(subreq, req->base.flags, + req->base.complete, req->base.data); + aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, + req->iv); + aead_request_set_ad(subreq, req->assoclen); - return err; + return is_encrypt ? crypto_aead_encrypt(subreq) : + crypto_aead_decrypt(subreq); } static void spacc_aead_complete(struct spacc_req *req) @@ -594,18 +544,19 @@ static void spacc_aead_complete(struct spacc_req *req) static int spacc_aead_submit(struct spacc_req *req) { - struct crypto_tfm *tfm = req->req->tfm; - struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_alg *alg = req->req->tfm->__crt_alg; - struct spacc_alg *spacc_alg = to_spacc_alg(alg); - struct spacc_engine *engine = ctx->generic.engine; - u32 ctrl, proc_len, assoc_len; struct aead_request *aead_req = container_of(req->req, struct aead_request, base); + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); + unsigned int authsize = crypto_aead_authsize(aead); + struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead); + struct aead_alg *alg = crypto_aead_alg(aead); + struct spacc_aead *spacc_alg = to_spacc_aead(alg); + struct spacc_engine *engine = ctx->generic.engine; + u32 ctrl, proc_len, assoc_len; req->result = -EINPROGRESS; req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key, - ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize, + ctx->cipher_key_len, aead_req->iv, crypto_aead_ivsize(aead), ctx->hash_ctx, ctx->hash_key_len); /* Set the source and destination DDT pointers. */ @@ -616,26 +567,16 @@ static int spacc_aead_submit(struct spacc_req *req) assoc_len = aead_req->assoclen; proc_len = aead_req->cryptlen + assoc_len; - /* - * If we aren't generating an IV, then we need to include the IV in the - * associated data so that it is included in the hash. - */ - if (!req->giv) { - assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req)); - proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req)); - } else - proc_len += req->giv_len; - /* * If we are decrypting, we need to take the length of the ICV out of * the processing length. */ if (!req->is_encrypt) - proc_len -= ctx->auth_size; + proc_len -= authsize; writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET); writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET); - writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET); + writel(authsize, engine->regs + SPA_ICV_LEN_REG_OFFSET); writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET); writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET); @@ -674,32 +615,29 @@ static void spacc_push(struct spacc_engine *engine) /* * Setup an AEAD request for processing. This will configure the engine, load * the context and then start the packet processing. - * - * @giv Pointer to destination address for a generated IV. If the - * request does not need to generate an IV then this should be set to NULL. */ -static int spacc_aead_setup(struct aead_request *req, u8 *giv, +static int spacc_aead_setup(struct aead_request *req, unsigned alg_type, bool is_encrypt) { - struct crypto_alg *alg = req->base.tfm->__crt_alg; - struct spacc_engine *engine = to_spacc_alg(alg)->engine; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct aead_alg *alg = crypto_aead_alg(aead); + struct spacc_engine *engine = to_spacc_aead(alg)->engine; struct spacc_req *dev_req = aead_request_ctx(req); - int err = -EINPROGRESS; + int err; unsigned long flags; - unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); - dev_req->giv = giv; - dev_req->giv_len = ivsize; dev_req->req = &req->base; dev_req->is_encrypt = is_encrypt; dev_req->result = -EBUSY; dev_req->engine = engine; dev_req->complete = spacc_aead_complete; - if (unlikely(spacc_aead_need_fallback(dev_req))) + if (unlikely(spacc_aead_need_fallback(req) || + ((err = spacc_aead_make_ddts(req)) == -E2BIG))) return spacc_aead_do_fallback(req, alg_type, is_encrypt); - spacc_aead_make_ddts(dev_req, dev_req->giv); + if (err) + goto out; err = -EINPROGRESS; spin_lock_irqsave(&engine->hw_lock, flags); @@ -728,70 +666,44 @@ out: static int spacc_aead_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct crypto_tfm *tfm = crypto_aead_tfm(aead); - struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); + struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead)); - return spacc_aead_setup(req, NULL, alg->type, 1); -} - -static int spacc_aead_givencrypt(struct aead_givcrypt_request *req) -{ - struct crypto_aead *tfm = aead_givcrypt_reqtfm(req); - struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); - size_t ivsize = crypto_aead_ivsize(tfm); - struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); - unsigned len; - __be64 seq; - - memcpy(req->areq.iv, ctx->salt, ivsize); - len = ivsize; - if (ivsize > sizeof(u64)) { - memset(req->giv, 0, ivsize - sizeof(u64)); - len = sizeof(u64); - } - seq = cpu_to_be64(req->seq); - memcpy(req->giv + ivsize - len, &seq, len); - - return spacc_aead_setup(&req->areq, req->giv, alg->type, 1); + return spacc_aead_setup(req, alg->type, 1); } static int spacc_aead_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct crypto_tfm *tfm = crypto_aead_tfm(aead); - struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); + struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead)); - return spacc_aead_setup(req, NULL, alg->type, 0); + return spacc_aead_setup(req, alg->type, 0); } /* * Initialise a new AEAD context. This is responsible for allocating the * fallback cipher and initialising the context. */ -static int spacc_aead_cra_init(struct crypto_tfm *tfm) +static int spacc_aead_cra_init(struct crypto_aead *tfm) { - struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_alg *alg = tfm->__crt_alg; - struct spacc_alg *spacc_alg = to_spacc_alg(alg); + struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct aead_alg *alg = crypto_aead_alg(tfm); + struct spacc_aead *spacc_alg = to_spacc_aead(alg); struct spacc_engine *engine = spacc_alg->engine; ctx->generic.flags = spacc_alg->type; ctx->generic.engine = engine; - ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0, - CRYPTO_ALG_ASYNC | + ctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0, CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(ctx->sw_cipher)) { - dev_warn(engine->dev, "failed to allocate fallback for %s\n", - alg->cra_name); - ctx->sw_cipher = NULL; - } + if (IS_ERR(ctx->sw_cipher)) + return PTR_ERR(ctx->sw_cipher); ctx->generic.key_offs = spacc_alg->key_offs; ctx->generic.iv_offs = spacc_alg->iv_offs; - get_random_bytes(ctx->salt, sizeof(ctx->salt)); - - crypto_aead_set_reqsize(__crypto_aead_cast(tfm), - sizeof(struct spacc_req)); + crypto_aead_set_reqsize( + tfm, + max(sizeof(struct spacc_req), + sizeof(struct aead_request) + + crypto_aead_reqsize(ctx->sw_cipher))); return 0; } @@ -800,13 +712,11 @@ static int spacc_aead_cra_init(struct crypto_tfm *tfm) * Destructor for an AEAD context. This is called when the transform is freed * and must free the fallback cipher. */ -static void spacc_aead_cra_exit(struct crypto_tfm *tfm) +static void spacc_aead_cra_exit(struct crypto_aead *tfm) { - struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); + struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); - if (ctx->sw_cipher) - crypto_free_aead(ctx->sw_cipher); - ctx->sw_cipher = NULL; + crypto_free_aead(ctx->sw_cipher); } /* @@ -1458,180 +1368,188 @@ static struct spacc_alg ipsec_engine_algs[] = { .cra_exit = spacc_ablk_cra_exit, }, }, +}; + +static struct spacc_aead ipsec_engine_aeads[] = { { - .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | - SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC, + .ctrl_default = SPA_CTRL_CIPH_ALG_AES | + SPA_CTRL_CIPH_MODE_CBC | + SPA_CTRL_HASH_ALG_SHA | + SPA_CTRL_HASH_MODE_HMAC, .key_offs = 0, .iv_offs = AES_MAX_KEY_SIZE, .alg = { - .cra_name = "authenc(hmac(sha1),cbc(aes))", - .cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell", - .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct spacc_aead_ctx), - .cra_type = &crypto_aead_type, - .cra_module = THIS_MODULE, - .cra_aead = { - .setkey = spacc_aead_setkey, - .setauthsize = spacc_aead_setauthsize, - .encrypt = spacc_aead_encrypt, - .decrypt = spacc_aead_decrypt, - .givencrypt = spacc_aead_givencrypt, - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-aes-picoxcell", + .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct spacc_aead_ctx), + .cra_module = THIS_MODULE, }, - .cra_init = spacc_aead_cra_init, - .cra_exit = spacc_aead_cra_exit, + .setkey = spacc_aead_setkey, + .setauthsize = spacc_aead_setauthsize, + .encrypt = spacc_aead_encrypt, + .decrypt = spacc_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + .init = spacc_aead_cra_init, + .exit = spacc_aead_cra_exit, }, }, { - .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | + .ctrl_default = SPA_CTRL_CIPH_ALG_AES | + SPA_CTRL_CIPH_MODE_CBC | SPA_CTRL_HASH_ALG_SHA256 | SPA_CTRL_HASH_MODE_HMAC, .key_offs = 0, .iv_offs = AES_MAX_KEY_SIZE, .alg = { - .cra_name = "authenc(hmac(sha256),cbc(aes))", - .cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell", - .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct spacc_aead_ctx), - .cra_type = &crypto_aead_type, - .cra_module = THIS_MODULE, - .cra_aead = { - .setkey = spacc_aead_setkey, - .setauthsize = spacc_aead_setauthsize, - .encrypt = spacc_aead_encrypt, - .decrypt = spacc_aead_decrypt, - .givencrypt = spacc_aead_givencrypt, - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-aes-picoxcell", + .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct spacc_aead_ctx), + .cra_module = THIS_MODULE, }, - .cra_init = spacc_aead_cra_init, - .cra_exit = spacc_aead_cra_exit, + .setkey = spacc_aead_setkey, + .setauthsize = spacc_aead_setauthsize, + .encrypt = spacc_aead_encrypt, + .decrypt = spacc_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + .init = spacc_aead_cra_init, + .exit = spacc_aead_cra_exit, }, }, { .key_offs = 0, .iv_offs = AES_MAX_KEY_SIZE, - .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | - SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC, + .ctrl_default = SPA_CTRL_CIPH_ALG_AES | + SPA_CTRL_CIPH_MODE_CBC | + SPA_CTRL_HASH_ALG_MD5 | + SPA_CTRL_HASH_MODE_HMAC, .alg = { - .cra_name = "authenc(hmac(md5),cbc(aes))", - .cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell", - .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct spacc_aead_ctx), - .cra_type = &crypto_aead_type, - .cra_module = THIS_MODULE, - .cra_aead = { - .setkey = spacc_aead_setkey, - .setauthsize = spacc_aead_setauthsize, - .encrypt = spacc_aead_encrypt, - .decrypt = spacc_aead_decrypt, - .givencrypt = spacc_aead_givencrypt, - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(md5),cbc(aes))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-aes-picoxcell", + .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct spacc_aead_ctx), + .cra_module = THIS_MODULE, }, - .cra_init = spacc_aead_cra_init, - .cra_exit = spacc_aead_cra_exit, + .setkey = spacc_aead_setkey, + .setauthsize = spacc_aead_setauthsize, + .encrypt = spacc_aead_encrypt, + .decrypt = spacc_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + .init = spacc_aead_cra_init, + .exit = spacc_aead_cra_exit, }, }, { .key_offs = DES_BLOCK_SIZE, .iv_offs = 0, - .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC | - SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC, + .ctrl_default = SPA_CTRL_CIPH_ALG_DES | + SPA_CTRL_CIPH_MODE_CBC | + SPA_CTRL_HASH_ALG_SHA | + SPA_CTRL_HASH_MODE_HMAC, .alg = { - .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", - .cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell", - .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct spacc_aead_ctx), - .cra_type = &crypto_aead_type, - .cra_module = THIS_MODULE, - .cra_aead = { - .setkey = spacc_aead_setkey, - .setauthsize = spacc_aead_setauthsize, - .encrypt = spacc_aead_encrypt, - .decrypt = spacc_aead_decrypt, - .givencrypt = spacc_aead_givencrypt, - .ivsize = DES3_EDE_BLOCK_SIZE, - .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-3des-picoxcell", + .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct spacc_aead_ctx), + .cra_module = THIS_MODULE, }, - .cra_init = spacc_aead_cra_init, - .cra_exit = spacc_aead_cra_exit, + .setkey = spacc_aead_setkey, + .setauthsize = spacc_aead_setauthsize, + .encrypt = spacc_aead_encrypt, + .decrypt = spacc_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + .init = spacc_aead_cra_init, + .exit = spacc_aead_cra_exit, }, }, { .key_offs = DES_BLOCK_SIZE, .iv_offs = 0, - .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | + .ctrl_default = SPA_CTRL_CIPH_ALG_AES | + SPA_CTRL_CIPH_MODE_CBC | SPA_CTRL_HASH_ALG_SHA256 | SPA_CTRL_HASH_MODE_HMAC, .alg = { - .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", - .cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell", - .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct spacc_aead_ctx), - .cra_type = &crypto_aead_type, - .cra_module = THIS_MODULE, - .cra_aead = { - .setkey = spacc_aead_setkey, - .setauthsize = spacc_aead_setauthsize, - .encrypt = spacc_aead_encrypt, - .decrypt = spacc_aead_decrypt, - .givencrypt = spacc_aead_givencrypt, - .ivsize = DES3_EDE_BLOCK_SIZE, - .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-3des-picoxcell", + .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct spacc_aead_ctx), + .cra_module = THIS_MODULE, }, - .cra_init = spacc_aead_cra_init, - .cra_exit = spacc_aead_cra_exit, + .setkey = spacc_aead_setkey, + .setauthsize = spacc_aead_setauthsize, + .encrypt = spacc_aead_encrypt, + .decrypt = spacc_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + .init = spacc_aead_cra_init, + .exit = spacc_aead_cra_exit, }, }, { .key_offs = DES_BLOCK_SIZE, .iv_offs = 0, - .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC | - SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC, + .ctrl_default = SPA_CTRL_CIPH_ALG_DES | + SPA_CTRL_CIPH_MODE_CBC | + SPA_CTRL_HASH_ALG_MD5 | + SPA_CTRL_HASH_MODE_HMAC, .alg = { - .cra_name = "authenc(hmac(md5),cbc(des3_ede))", - .cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell", - .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct spacc_aead_ctx), - .cra_type = &crypto_aead_type, - .cra_module = THIS_MODULE, - .cra_aead = { - .setkey = spacc_aead_setkey, - .setauthsize = spacc_aead_setauthsize, - .encrypt = spacc_aead_encrypt, - .decrypt = spacc_aead_decrypt, - .givencrypt = spacc_aead_givencrypt, - .ivsize = DES3_EDE_BLOCK_SIZE, - .maxauthsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(md5),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-3des-picoxcell", + .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct spacc_aead_ctx), + .cra_module = THIS_MODULE, }, - .cra_init = spacc_aead_cra_init, - .cra_exit = spacc_aead_cra_exit, + .setkey = spacc_aead_setkey, + .setauthsize = spacc_aead_setauthsize, + .encrypt = spacc_aead_encrypt, + .decrypt = spacc_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + .init = spacc_aead_cra_init, + .exit = spacc_aead_cra_exit, }, }, }; @@ -1707,6 +1625,8 @@ static int spacc_probe(struct platform_device *pdev) engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ; engine->algs = ipsec_engine_algs; engine->num_algs = ARRAY_SIZE(ipsec_engine_algs); + engine->aeads = ipsec_engine_aeads; + engine->num_aeads = ARRAY_SIZE(ipsec_engine_aeads); } else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) { engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS; engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ; @@ -1815,17 +1735,40 @@ static int spacc_probe(struct platform_device *pdev) engine->algs[i].alg.cra_name); } + INIT_LIST_HEAD(&engine->registered_aeads); + for (i = 0; i < engine->num_aeads; ++i) { + engine->aeads[i].engine = engine; + err = crypto_register_aead(&engine->aeads[i].alg); + if (!err) { + list_add_tail(&engine->aeads[i].entry, + &engine->registered_aeads); + ret = 0; + } + if (err) + dev_err(engine->dev, "failed to register alg \"%s\"\n", + engine->aeads[i].alg.base.cra_name); + else + dev_dbg(engine->dev, "registered alg \"%s\"\n", + engine->aeads[i].alg.base.cra_name); + } + return ret; } static int spacc_remove(struct platform_device *pdev) { + struct spacc_aead *aead, *an; struct spacc_alg *alg, *next; struct spacc_engine *engine = platform_get_drvdata(pdev); del_timer_sync(&engine->packet_timeout); device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); + list_for_each_entry_safe(aead, an, &engine->registered_aeads, entry) { + list_del(&aead->entry); + crypto_unregister_aead(&aead->alg); + } + list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) { list_del(&alg->entry); crypto_unregister_alg(&alg->alg); diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig index 6fdb9e8b2..eefccf7b8 100644 --- a/drivers/crypto/qat/Kconfig +++ b/drivers/crypto/qat/Kconfig @@ -3,11 +3,13 @@ config CRYPTO_DEV_QAT select CRYPTO_AEAD select CRYPTO_AUTHENC select CRYPTO_BLKCIPHER + select CRYPTO_AKCIPHER select CRYPTO_HMAC select CRYPTO_SHA1 select CRYPTO_SHA256 select CRYPTO_SHA512 select FW_LOADER + select ASN1 config CRYPTO_DEV_QAT_DH895xCC tristate "Support for Intel(R) DH895xCC" @@ -19,3 +21,16 @@ config CRYPTO_DEV_QAT_DH895xCC To compile this as a module, choose M here: the module will be called qat_dh895xcc. + +config CRYPTO_DEV_QAT_DH895xCCVF + tristate "Support for Intel(R) DH895xCC Virtual Function" + depends on X86 && PCI + select PCI_IOV + select CRYPTO_DEV_QAT + + help + Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology + Virtual Function for accelerating crypto and compression workloads. + + To compile this as a module, choose M here: the module + will be called qat_dh895xccvf. diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile index d11481be2..a3ce0b70e 100644 --- a/drivers/crypto/qat/Makefile +++ b/drivers/crypto/qat/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/ +obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/ diff --git a/drivers/crypto/qat/qat_common/.gitignore b/drivers/crypto/qat/qat_common/.gitignore new file mode 100644 index 000000000..ee328374d --- /dev/null +++ b/drivers/crypto/qat/qat_common/.gitignore @@ -0,0 +1 @@ +*-asn1.[ch] diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile index e0424dc38..df20a9de1 100644 --- a/drivers/crypto/qat/qat_common/Makefile +++ b/drivers/crypto/qat/qat_common/Makefile @@ -1,3 +1,6 @@ +$(obj)/qat_rsakey-asn1.o: $(obj)/qat_rsakey-asn1.c $(obj)/qat_rsakey-asn1.h +clean-files += qat_rsakey-asn1.c qat_rsakey-asn1.h + obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o intel_qat-objs := adf_cfg.o \ adf_ctl_drv.o \ @@ -6,9 +9,14 @@ intel_qat-objs := adf_cfg.o \ adf_accel_engine.o \ adf_aer.o \ adf_transport.o \ + adf_admin.o \ + adf_hw_arbiter.o \ qat_crypto.o \ qat_algs.o \ + qat_rsakey-asn1.o \ + qat_asym_algs.o \ qat_uclo.o \ qat_hal.o intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o +intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index 5fe902967..ca853d50b 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -46,13 +46,17 @@ */ #ifndef ADF_ACCEL_DEVICES_H_ #define ADF_ACCEL_DEVICES_H_ +#include #include #include #include +#include #include "adf_cfg_common.h" #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" +#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf" #define ADF_DH895XCC_PCI_DEVICE_ID 0x435 +#define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443 #define ADF_PCI_MAX_BARS 3 #define ADF_DEVICE_NAME_LENGTH 32 #define ADF_ETR_MAX_RINGS_PER_BANK 16 @@ -79,6 +83,7 @@ struct adf_bar { struct adf_accel_msix { struct msix_entry *entries; char **names; + u32 num_entries; } __packed; struct adf_accel_pci { @@ -99,6 +104,7 @@ enum dev_sku_info { DEV_SKU_2, DEV_SKU_3, DEV_SKU_4, + DEV_SKU_VF, DEV_SKU_UNKNOWN, }; @@ -113,6 +119,8 @@ static inline const char *get_sku_info(enum dev_sku_info info) return "SKU3"; case DEV_SKU_4: return "SKU4"; + case DEV_SKU_VF: + return "SKUVF"; case DEV_SKU_UNKNOWN: default: break; @@ -135,23 +143,29 @@ struct adf_hw_device_data { struct adf_hw_device_class *dev_class; uint32_t (*get_accel_mask)(uint32_t fuse); uint32_t (*get_ae_mask)(uint32_t fuse); + uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self); uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self); uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self); uint32_t (*get_num_aes)(struct adf_hw_device_data *self); uint32_t (*get_num_accels)(struct adf_hw_device_data *self); + uint32_t (*get_pf2vf_offset)(uint32_t i); + uint32_t (*get_vintmsk_offset)(uint32_t i); enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self); - void (*hw_arb_ring_enable)(struct adf_etr_ring_data *ring); - void (*hw_arb_ring_disable)(struct adf_etr_ring_data *ring); int (*alloc_irq)(struct adf_accel_dev *accel_dev); void (*free_irq)(struct adf_accel_dev *accel_dev); void (*enable_error_correction)(struct adf_accel_dev *accel_dev); int (*init_admin_comms)(struct adf_accel_dev *accel_dev); void (*exit_admin_comms)(struct adf_accel_dev *accel_dev); + int (*send_admin_init)(struct adf_accel_dev *accel_dev); int (*init_arb)(struct adf_accel_dev *accel_dev); void (*exit_arb)(struct adf_accel_dev *accel_dev); + void (*get_arb_mapping)(struct adf_accel_dev *accel_dev, + const uint32_t **cfg); + void (*disable_iov)(struct adf_accel_dev *accel_dev); void (*enable_ints)(struct adf_accel_dev *accel_dev); + int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev); const char *fw_name; - uint32_t pci_dev_id; + const char *fw_mmp_name; uint32_t fuses; uint32_t accel_capabilities_mask; uint16_t accel_mask; @@ -163,6 +177,7 @@ struct adf_hw_device_data { uint8_t num_accel; uint8_t num_logical_accel; uint8_t num_engines; + uint8_t min_iov_compat_ver; } __packed; /* CSR write macro */ @@ -184,6 +199,16 @@ struct icp_qat_fw_loader_handle; struct adf_fw_loader_data { struct icp_qat_fw_loader_handle *fw_loader; const struct firmware *uof_fw; + const struct firmware *mmp_fw; +}; + +struct adf_accel_vf_info { + struct adf_accel_dev *accel_dev; + struct tasklet_struct vf2pf_bh_tasklet; + struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */ + struct ratelimit_state vf2pf_ratelimit; + u32 vf_nr; + bool init; }; struct adf_accel_dev { @@ -199,6 +224,21 @@ struct adf_accel_dev { struct list_head list; struct module *owner; struct adf_accel_pci accel_pci_dev; + union { + struct { + /* vf_info is non-zero when SR-IOV is init'ed */ + struct adf_accel_vf_info *vf_info; + } pf; + struct { + char *irq_name; + struct tasklet_struct pf2vf_bh_tasklet; + struct mutex vf2pf_lock; /* protect CSR access */ + struct completion iov_msg_completion; + uint8_t compatible; + uint8_t pf_version; + } vf; + }; + bool is_vf; uint8_t accel_id; } __packed; #endif diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c index 33afb9f92..64a6f27e2 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_engine.c +++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c @@ -55,24 +55,36 @@ int adf_ae_fw_load(struct adf_accel_dev *accel_dev) { struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; struct adf_hw_device_data *hw_device = accel_dev->hw_device; - void *uof_addr; - uint32_t uof_size; + void *uof_addr, *mmp_addr; + u32 uof_size, mmp_size; + if (!hw_device->fw_name) + return 0; + + if (reject_firmware(&loader_data->mmp_fw, hw_device->fw_mmp_name, + &accel_dev->accel_pci_dev.pci_dev->dev)) { + dev_err(&GET_DEV(accel_dev), "Failed to load MMP firmware %s\n", + hw_device->fw_mmp_name); + return -EFAULT; + } if (reject_firmware(&loader_data->uof_fw, hw_device->fw_name, &accel_dev->accel_pci_dev.pci_dev->dev)) { - dev_err(&GET_DEV(accel_dev), "Failed to load firmware %s\n", + dev_err(&GET_DEV(accel_dev), "Failed to load UOF firmware %s\n", hw_device->fw_name); - return -EFAULT; + goto out_err; } uof_size = loader_data->uof_fw->size; uof_addr = (void *)loader_data->uof_fw->data; + mmp_size = loader_data->mmp_fw->size; + mmp_addr = (void *)loader_data->mmp_fw->data; + qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size); if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) { dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n"); goto out_err; } if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) { - dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n"); + dev_err(&GET_DEV(accel_dev), "Failed to load UOF\n"); goto out_err; } return 0; @@ -85,11 +97,17 @@ out_err: void adf_ae_fw_release(struct adf_accel_dev *accel_dev) { struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + + if (!hw_device->fw_name) + return; qat_uclo_del_uof_obj(loader_data->fw_loader); qat_hal_deinit(loader_data->fw_loader); release_firmware(loader_data->uof_fw); + release_firmware(loader_data->mmp_fw); loader_data->uof_fw = NULL; + loader_data->mmp_fw = NULL; loader_data->fw_loader = NULL; } @@ -99,6 +117,9 @@ int adf_ae_start(struct adf_accel_dev *accel_dev) struct adf_hw_device_data *hw_data = accel_dev->hw_device; uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); + if (!hw_data->fw_name) + return 0; + for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) { if (hw_data->ae_mask & (1 << ae)) { qat_hal_start(loader_data->fw_loader, ae, 0xFF); @@ -117,6 +138,9 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev) struct adf_hw_device_data *hw_data = accel_dev->hw_device; uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); + if (!hw_data->fw_name) + return 0; + for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) { if (hw_data->ae_mask & (1 << ae)) { qat_hal_stop(loader_data->fw_loader, ae, 0xFF); @@ -143,6 +167,10 @@ static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae) int adf_ae_init(struct adf_accel_dev *accel_dev) { struct adf_fw_loader_data *loader_data; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + + if (!hw_device->fw_name) + return 0; loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL); if (!loader_data) @@ -166,6 +194,10 @@ int adf_ae_init(struct adf_accel_dev *accel_dev) int adf_ae_shutdown(struct adf_accel_dev *accel_dev) { struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + + if (!hw_device->fw_name) + return 0; qat_hal_deinit(loader_data->fw_loader); kfree(accel_dev->fw_loader); diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c new file mode 100644 index 000000000..147d755fe --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_admin.c @@ -0,0 +1,290 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include +#include +#include +#include "adf_accel_devices.h" +#include "icp_qat_fw_init_admin.h" + +/* Admin Messages Registers */ +#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574) +#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578) +#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970 +#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000 +#define ADF_ADMINMSG_LEN 32 + +static const u8 const_tab[1024] = { +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, +0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, +0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, +0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0, +0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e, +0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39, +0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, +0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, +0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, +0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29, +0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17, +0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff, +0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, +0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, +0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, +0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, +0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52, +0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f, +0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, +0x7e, 0x21, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + +struct adf_admin_comms { + dma_addr_t phy_addr; + dma_addr_t const_tbl_addr; + void *virt_addr; + void __iomem *mailbox_addr; + struct mutex lock; /* protects adf_admin_comms struct */ +}; + +static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae, + void *in, void *out) +{ + struct adf_admin_comms *admin = accel_dev->admin; + int offset = ae * ADF_ADMINMSG_LEN * 2; + void __iomem *mailbox = admin->mailbox_addr; + int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE; + int times, received; + + mutex_lock(&admin->lock); + + if (ADF_CSR_RD(mailbox, mb_offset) == 1) { + mutex_unlock(&admin->lock); + return -EAGAIN; + } + + memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN); + ADF_CSR_WR(mailbox, mb_offset, 1); + received = 0; + for (times = 0; times < 50; times++) { + msleep(20); + if (ADF_CSR_RD(mailbox, mb_offset) == 0) { + received = 1; + break; + } + } + if (received) + memcpy(out, admin->virt_addr + offset + + ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN); + else + dev_err(&GET_DEV(accel_dev), + "Failed to send admin msg to accelerator\n"); + + mutex_unlock(&admin->lock); + return received ? 0 : -EFAULT; +} + +static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd) +{ + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + struct icp_qat_fw_init_admin_req req; + struct icp_qat_fw_init_admin_resp resp; + int i; + + memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req)); + req.init_admin_cmd_id = cmd; + + if (cmd == ICP_QAT_FW_CONSTANTS_CFG) { + req.init_cfg_sz = 1024; + req.init_cfg_ptr = accel_dev->admin->const_tbl_addr; + } + for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { + memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp)); + if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) || + resp.init_resp_hdr.status) + return -EFAULT; + } + return 0; +} + +/** + * adf_send_admin_init() - Function sends init message to FW + * @accel_dev: Pointer to acceleration device. + * + * Function sends admin init message to the FW + * + * Return: 0 on success, error code otherwise. + */ +int adf_send_admin_init(struct adf_accel_dev *accel_dev) +{ + int ret = adf_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME); + + if (ret) + return ret; + return adf_send_admin_cmd(accel_dev, ICP_QAT_FW_CONSTANTS_CFG); +} +EXPORT_SYMBOL_GPL(adf_send_admin_init); + +int adf_init_admin_comms(struct adf_accel_dev *accel_dev) +{ + struct adf_admin_comms *admin; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *pmisc = + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; + void __iomem *csr = pmisc->virt_addr; + void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET; + u64 reg_val; + + admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, + dev_to_node(&GET_DEV(accel_dev))); + if (!admin) + return -ENOMEM; + admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, + &admin->phy_addr, GFP_KERNEL); + if (!admin->virt_addr) { + dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); + kfree(admin); + return -ENOMEM; + } + + admin->const_tbl_addr = dma_map_single(&GET_DEV(accel_dev), + (void *) const_tab, 1024, + DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(&GET_DEV(accel_dev), + admin->const_tbl_addr))) { + dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, + admin->virt_addr, admin->phy_addr); + kfree(admin); + return -ENOMEM; + } + reg_val = (u64)admin->phy_addr; + ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32); + ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val); + mutex_init(&admin->lock); + admin->mailbox_addr = mailbox; + accel_dev->admin = admin; + return 0; +} +EXPORT_SYMBOL_GPL(adf_init_admin_comms); + +void adf_exit_admin_comms(struct adf_accel_dev *accel_dev) +{ + struct adf_admin_comms *admin = accel_dev->admin; + + if (!admin) + return; + + if (admin->virt_addr) + dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, + admin->virt_addr, admin->phy_addr); + + dma_unmap_single(&GET_DEV(accel_dev), admin->const_tbl_addr, 1024, + DMA_TO_DEVICE); + mutex_destroy(&admin->lock); + kfree(admin); + accel_dev->admin = NULL; +} +EXPORT_SYMBOL_GPL(adf_exit_admin_comms); diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index 2dbc733b8..0a5ca0ba5 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c @@ -88,9 +88,15 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev) struct pci_dev *parent = pdev->bus->self; uint16_t bridge_ctl = 0; + if (accel_dev->is_vf) + return; + dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n", accel_dev->accel_id); + if (!parent) + parent = pdev; + if (!pci_wait_for_pending_transaction(pdev)) dev_info(&GET_DEV(accel_dev), "Transaction still in progress. Proceeding\n"); @@ -206,7 +212,7 @@ static struct pci_error_handlers adf_err_handler = { * QAT acceleration device accel_dev. * To be used by QAT device specific drivers. * - * Return: 0 on success, error code othewise. + * Return: 0 on success, error code otherwise. */ int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf) { diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c index ab65bc274..d08797905 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg.c +++ b/drivers/crypto/qat/qat_common/adf_cfg.c @@ -123,7 +123,7 @@ static const struct file_operations qat_dev_cfg_fops = { * The table stores device specific config values. * To be used by QAT device specific drivers. * - * Return: 0 on success, error code othewise. + * Return: 0 on success, error code otherwise. */ int adf_cfg_dev_add(struct adf_accel_dev *accel_dev) { @@ -178,6 +178,9 @@ void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev) { struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; + if (!dev_cfg_data) + return; + down_write(&dev_cfg_data->lock); adf_cfg_section_del_all(&dev_cfg_data->sec_list); up_write(&dev_cfg_data->lock); @@ -276,7 +279,7 @@ static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev, * in the given acceleration device * To be used by QAT device specific drivers. * - * Return: 0 on success, error code othewise. + * Return: 0 on success, error code otherwise. */ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, const char *section_name, @@ -327,7 +330,7 @@ EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param); * will be stored. * To be used by QAT device specific drivers. * - * Return: 0 on success, error code othewise. + * Return: 0 on success, error code otherwise. */ int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name) { diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h index 88b82187a..c697fb1cd 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_common.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h @@ -60,7 +60,7 @@ #define ADF_CFG_NO_DEVICE 0xFF #define ADF_CFG_AFFINITY_WHATEVER 0xFF #define MAX_DEVICE_NAME_SIZE 32 -#define ADF_MAX_DEVICES 32 +#define ADF_MAX_DEVICES (32 * 32) enum adf_cfg_val_type { ADF_DEC, @@ -71,6 +71,7 @@ enum adf_cfg_val_type { enum adf_device_type { DEV_UNKNOWN = 0, DEV_DH895XCC, + DEV_DH895XCCVF, }; struct adf_dev_status_info { diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 27e16c092..7836dffc3 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -54,8 +54,8 @@ #include "icp_qat_hal.h" #define ADF_MAJOR_VERSION 0 -#define ADF_MINOR_VERSION 1 -#define ADF_BUILD_VERSION 3 +#define ADF_MINOR_VERSION 2 +#define ADF_BUILD_VERSION 0 #define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \ __stringify(ADF_MINOR_VERSION) "." \ __stringify(ADF_BUILD_VERSION) @@ -91,9 +91,13 @@ struct service_hndl { unsigned long start_status; char *name; struct list_head list; - int admin; }; +static inline int get_current_node(void) +{ + return topology_physical_package_id(smp_processor_id()); +} + int adf_service_register(struct service_hndl *service); int adf_service_unregister(struct service_hndl *service); @@ -102,13 +106,24 @@ int adf_dev_start(struct adf_accel_dev *accel_dev); int adf_dev_stop(struct adf_accel_dev *accel_dev); void adf_dev_shutdown(struct adf_accel_dev *accel_dev); +void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); +void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); +int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr); +void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev); +int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev); +void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info); +void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data); +void adf_clean_vf_map(bool); + int adf_ctl_dev_register(void); void adf_ctl_dev_unregister(void); int adf_processes_dev_register(void); void adf_processes_dev_unregister(void); -int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev); -void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev); +int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, + struct adf_accel_dev *pf); +void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, + struct adf_accel_dev *pf); struct list_head *adf_devmgr_get_head(void); struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id); struct adf_accel_dev *adf_devmgr_get_first(void); @@ -130,6 +145,12 @@ int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf); void adf_disable_aer(struct adf_accel_dev *accel_dev); int adf_init_aer(void); void adf_exit_aer(void); +int adf_init_admin_comms(struct adf_accel_dev *accel_dev); +void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); +int adf_send_admin_init(struct adf_accel_dev *accel_dev); +int adf_init_arb(struct adf_accel_dev *accel_dev); +void adf_exit_arb(struct adf_accel_dev *accel_dev); +void adf_update_ring_arb(struct adf_etr_ring_data *ring); int adf_dev_get(struct adf_accel_dev *accel_dev); void adf_dev_put(struct adf_accel_dev *accel_dev); @@ -141,10 +162,13 @@ int qat_crypto_unregister(void); struct qat_crypto_instance *qat_crypto_get_instance_node(int node); void qat_crypto_put_instance(struct qat_crypto_instance *inst); void qat_alg_callback(void *resp); +void qat_alg_asym_callback(void *resp); int qat_algs_init(void); void qat_algs_exit(void); int qat_algs_register(void); int qat_algs_unregister(void); +int qat_asym_algs_register(void); +void qat_asym_algs_unregister(void); int qat_hal_init(struct adf_accel_dev *accel_dev); void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle); @@ -196,4 +220,23 @@ int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle); void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle); int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, void *addr_ptr, int mem_size); +void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, + void *addr_ptr, int mem_size); +#if defined(CONFIG_PCI_IOV) +int adf_sriov_configure(struct pci_dev *pdev, int numvfs); +void adf_disable_sriov(struct adf_accel_dev *accel_dev); +void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, + uint32_t vf_mask); +void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, + uint32_t vf_mask); +#else +static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs) +{ + return 0; +} + +static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev) +{ +} +#endif #endif diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index e056b9e9b..cd8a12af8 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -398,10 +398,9 @@ static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd, } accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id); - if (!accel_dev) { - pr_err("QAT: Device %d not found\n", dev_info.accel_id); + if (!accel_dev) return -ENODEV; - } + hw_data = accel_dev->hw_device; dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN; dev_info.num_ae = hw_data->get_num_aes(hw_data); @@ -495,6 +494,7 @@ static void __exit adf_unregister_ctl_device_driver(void) adf_exit_aer(); qat_crypto_unregister(); qat_algs_exit(); + adf_clean_vf_map(false); mutex_destroy(&adf_ctl_lock); } diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c index 3f0ff9e7d..8dfdb8f90 100644 --- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c +++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c @@ -50,21 +50,125 @@ #include "adf_common_drv.h" static LIST_HEAD(accel_table); +static LIST_HEAD(vfs_table); static DEFINE_MUTEX(table_lock); static uint32_t num_devices; +struct vf_id_map { + u32 bdf; + u32 id; + u32 fake_id; + bool attached; + struct list_head list; +}; + +static int adf_get_vf_id(struct adf_accel_dev *vf) +{ + return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) + + PCI_FUNC(accel_to_pci_dev(vf)->devfn) + + (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)); +} + +static int adf_get_vf_num(struct adf_accel_dev *vf) +{ + return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf); +} + +static struct vf_id_map *adf_find_vf(u32 bdf) +{ + struct list_head *itr; + + list_for_each(itr, &vfs_table) { + struct vf_id_map *ptr = + list_entry(itr, struct vf_id_map, list); + + if (ptr->bdf == bdf) + return ptr; + } + return NULL; +} + +static int adf_get_vf_real_id(u32 fake) +{ + struct list_head *itr; + + list_for_each(itr, &vfs_table) { + struct vf_id_map *ptr = + list_entry(itr, struct vf_id_map, list); + if (ptr->fake_id == fake) + return ptr->id; + } + return -1; +} + +/** + * adf_clean_vf_map() - Cleans VF id mapings + * + * Function cleans internal ids for virtual functions. + * @vf: flag indicating whether mappings is cleaned + * for vfs only or for vfs and pfs + */ +void adf_clean_vf_map(bool vf) +{ + struct vf_id_map *map; + struct list_head *ptr, *tmp; + + mutex_lock(&table_lock); + list_for_each_safe(ptr, tmp, &vfs_table) { + map = list_entry(ptr, struct vf_id_map, list); + if (map->bdf != -1) + num_devices--; + + if (vf && map->bdf == -1) + continue; + + list_del(ptr); + kfree(map); + } + mutex_unlock(&table_lock); +} +EXPORT_SYMBOL_GPL(adf_clean_vf_map); + +/** + * adf_devmgr_update_class_index() - Update internal index + * @hw_data: Pointer to internal device data. + * + * Function updates internal dev index for VFs + */ +void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data) +{ + struct adf_hw_device_class *class = hw_data->dev_class; + struct list_head *itr; + int i = 0; + + list_for_each(itr, &accel_table) { + struct adf_accel_dev *ptr = + list_entry(itr, struct adf_accel_dev, list); + + if (ptr->hw_device->dev_class == class) + ptr->hw_device->instance_id = i++; + + if (i == class->instances) + break; + } +} +EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index); + /** * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework * @accel_dev: Pointer to acceleration device. + * @pf: Corresponding PF if the accel_dev is a VF * * Function adds acceleration device to the acceleration framework. * To be used by QAT device specific drivers. * - * Return: 0 on success, error code othewise. + * Return: 0 on success, error code otherwise. */ -int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev) +int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, + struct adf_accel_dev *pf) { struct list_head *itr; + int ret = 0; if (num_devices == ADF_MAX_DEVICES) { dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n", @@ -73,20 +177,77 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev) } mutex_lock(&table_lock); - list_for_each(itr, &accel_table) { - struct adf_accel_dev *ptr = + atomic_set(&accel_dev->ref_count, 0); + + /* PF on host or VF on guest */ + if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) { + struct vf_id_map *map; + + list_for_each(itr, &accel_table) { + struct adf_accel_dev *ptr = list_entry(itr, struct adf_accel_dev, list); - if (ptr == accel_dev) { - mutex_unlock(&table_lock); - return -EEXIST; + if (ptr == accel_dev) { + ret = -EEXIST; + goto unlock; + } } + + list_add_tail(&accel_dev->list, &accel_table); + accel_dev->accel_id = num_devices++; + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (!map) { + ret = -ENOMEM; + goto unlock; + } + map->bdf = ~0; + map->id = accel_dev->accel_id; + map->fake_id = map->id; + map->attached = true; + list_add_tail(&map->list, &vfs_table); + } else if (accel_dev->is_vf && pf) { + /* VF on host */ + struct adf_accel_vf_info *vf_info; + struct vf_id_map *map; + + vf_info = pf->pf.vf_info + adf_get_vf_id(accel_dev); + + map = adf_find_vf(adf_get_vf_num(accel_dev)); + if (map) { + struct vf_id_map *next; + + accel_dev->accel_id = map->id; + list_add_tail(&accel_dev->list, &accel_table); + map->fake_id++; + map->attached = true; + next = list_next_entry(map, list); + while (next && &next->list != &vfs_table) { + next->fake_id++; + next = list_next_entry(next, list); + } + + ret = 0; + goto unlock; + } + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (!map) { + ret = -ENOMEM; + goto unlock; + } + + accel_dev->accel_id = num_devices++; + list_add_tail(&accel_dev->list, &accel_table); + map->bdf = adf_get_vf_num(accel_dev); + map->id = accel_dev->accel_id; + map->fake_id = map->id; + map->attached = true; + list_add_tail(&map->list, &vfs_table); } - atomic_set(&accel_dev->ref_count, 0); - list_add_tail(&accel_dev->list, &accel_table); - accel_dev->accel_id = num_devices++; +unlock: mutex_unlock(&table_lock); - return 0; + return ret; } EXPORT_SYMBOL_GPL(adf_devmgr_add_dev); @@ -98,17 +259,37 @@ struct list_head *adf_devmgr_get_head(void) /** * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework. * @accel_dev: Pointer to acceleration device. + * @pf: Corresponding PF if the accel_dev is a VF * * Function removes acceleration device from the acceleration framework. * To be used by QAT device specific drivers. * * Return: void */ -void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev) +void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, + struct adf_accel_dev *pf) { mutex_lock(&table_lock); + if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) { + num_devices--; + } else if (accel_dev->is_vf && pf) { + struct vf_id_map *map, *next; + + map = adf_find_vf(adf_get_vf_num(accel_dev)); + if (!map) { + dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n"); + goto unlock; + } + map->fake_id--; + map->attached = false; + next = list_next_entry(map, list); + while (next && &next->list != &vfs_table) { + next->fake_id--; + next = list_next_entry(next, list); + } + } +unlock: list_del(&accel_dev->list); - num_devices--; mutex_unlock(&table_lock); } EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev); @@ -154,17 +335,24 @@ EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev); struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id) { struct list_head *itr; + int real_id; mutex_lock(&table_lock); + real_id = adf_get_vf_real_id(id); + if (real_id < 0) + goto unlock; + + id = real_id; + list_for_each(itr, &accel_table) { struct adf_accel_dev *ptr = list_entry(itr, struct adf_accel_dev, list); - if (ptr->accel_id == id) { mutex_unlock(&table_lock); return ptr; } } +unlock: mutex_unlock(&table_lock); return NULL; } @@ -180,21 +368,52 @@ int adf_devmgr_verify_id(uint32_t id) return -ENODEV; } -void adf_devmgr_get_num_dev(uint32_t *num) +static int adf_get_num_dettached_vfs(void) { struct list_head *itr; + int vfs = 0; - *num = 0; - list_for_each(itr, &accel_table) { - (*num)++; + mutex_lock(&table_lock); + list_for_each(itr, &vfs_table) { + struct vf_id_map *ptr = + list_entry(itr, struct vf_id_map, list); + if (ptr->bdf != ~0 && !ptr->attached) + vfs++; } + mutex_unlock(&table_lock); + return vfs; +} + +void adf_devmgr_get_num_dev(uint32_t *num) +{ + *num = num_devices - adf_get_num_dettached_vfs(); } +/** + * adf_dev_in_use() - Check whether accel_dev is currently in use + * @accel_dev: Pointer to acceleration device. + * + * To be used by QAT device specific drivers. + * + * Return: 1 when device is in use, 0 otherwise. + */ int adf_dev_in_use(struct adf_accel_dev *accel_dev) { return atomic_read(&accel_dev->ref_count) != 0; } +EXPORT_SYMBOL_GPL(adf_dev_in_use); +/** + * adf_dev_get() - Increment accel_dev reference count + * @accel_dev: Pointer to acceleration device. + * + * Increment the accel_dev refcount and if this is the first time + * incrementing it during this period the accel_dev is in use, + * increment the module refcount too. + * To be used by QAT device specific drivers. + * + * Return: 0 when successful, EFAULT when fail to bump module refcount + */ int adf_dev_get(struct adf_accel_dev *accel_dev) { if (atomic_add_return(1, &accel_dev->ref_count) == 1) @@ -202,19 +421,50 @@ int adf_dev_get(struct adf_accel_dev *accel_dev) return -EFAULT; return 0; } +EXPORT_SYMBOL_GPL(adf_dev_get); +/** + * adf_dev_put() - Decrement accel_dev reference count + * @accel_dev: Pointer to acceleration device. + * + * Decrement the accel_dev refcount and if this is the last time + * decrementing it during this period the accel_dev is in use, + * decrement the module refcount too. + * To be used by QAT device specific drivers. + * + * Return: void + */ void adf_dev_put(struct adf_accel_dev *accel_dev) { if (atomic_sub_return(1, &accel_dev->ref_count) == 0) module_put(accel_dev->owner); } +EXPORT_SYMBOL_GPL(adf_dev_put); +/** + * adf_devmgr_in_reset() - Check whether device is in reset + * @accel_dev: Pointer to acceleration device. + * + * To be used by QAT device specific drivers. + * + * Return: 1 when the device is being reset, 0 otherwise. + */ int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev) { return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status); } +EXPORT_SYMBOL_GPL(adf_devmgr_in_reset); +/** + * adf_dev_started() - Check whether device has started + * @accel_dev: Pointer to acceleration device. + * + * To be used by QAT device specific drivers. + * + * Return: 1 when the device has started, 0 otherwise + */ int adf_dev_started(struct adf_accel_dev *accel_dev) { return test_bit(ADF_STATUS_STARTED, &accel_dev->status); } +EXPORT_SYMBOL_GPL(adf_dev_started); diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c new file mode 100644 index 000000000..6849422e0 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c @@ -0,0 +1,168 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include "adf_accel_devices.h" +#include "adf_transport_internal.h" + +#define ADF_ARB_NUM 4 +#define ADF_ARB_REQ_RING_NUM 8 +#define ADF_ARB_REG_SIZE 0x4 +#define ADF_ARB_WTR_SIZE 0x20 +#define ADF_ARB_OFFSET 0x30000 +#define ADF_ARB_REG_SLOT 0x1000 +#define ADF_ARB_WTR_OFFSET 0x010 +#define ADF_ARB_RO_EN_OFFSET 0x090 +#define ADF_ARB_WQCFG_OFFSET 0x100 +#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180 +#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C + +#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ + (ADF_ARB_REG_SLOT * index), value) + +#define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ + ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value) + +#define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \ + ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ + ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \ + (ADF_ARB_REG_SIZE * index), value) + +#define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \ + (ADF_ARB_REG_SIZE * index), value) + +#define WRITE_CSR_ARB_WRK_2_SER_MAP(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ + ADF_ARB_WRK_2_SER_MAP_OFFSET) + \ + (ADF_ARB_REG_SIZE * index), value) + +#define WRITE_CSR_ARB_WQCFG(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ + ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value) + +int adf_init_arb(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + void __iomem *csr = accel_dev->transport->banks[0].csr_addr; + u32 arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1; + u32 arb, i; + const u32 *thd_2_arb_cfg; + + /* Service arb configured for 32 bytes responses and + * ring flow control check enabled. */ + for (arb = 0; arb < ADF_ARB_NUM; arb++) + WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg); + + /* Setup service weighting */ + for (arb = 0; arb < ADF_ARB_NUM; arb++) + for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++) + WRITE_CSR_ARB_WEIGHT(csr, arb, i, 0xFFFFFFFF); + + /* Setup ring response ordering */ + for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++) + WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF); + + /* Setup worker queue registers */ + for (i = 0; i < hw_data->num_engines; i++) + WRITE_CSR_ARB_WQCFG(csr, i, i); + + /* Map worker threads to service arbiters */ + hw_data->get_arb_mapping(accel_dev, &thd_2_arb_cfg); + + if (!thd_2_arb_cfg) + return -EFAULT; + + for (i = 0; i < hw_data->num_engines; i++) + WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i)); + + return 0; +} +EXPORT_SYMBOL_GPL(adf_init_arb); + +/** + * adf_update_ring_arb() - update ring arbitration rgister + * @accel_dev: Pointer to ring data. + * + * Function enables or disables rings for/from arbitration. + */ +void adf_update_ring_arb(struct adf_etr_ring_data *ring) +{ + WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr, + ring->bank->bank_number, + ring->bank->ring_mask & 0xFF); +} +EXPORT_SYMBOL_GPL(adf_update_ring_arb); + +void adf_exit_arb(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + void __iomem *csr; + unsigned int i; + + if (!accel_dev->transport) + return; + + csr = accel_dev->transport->banks[0].csr_addr; + + /* Reset arbiter configuration */ + for (i = 0; i < ADF_ARB_NUM; i++) + WRITE_CSR_ARB_SARCONFIG(csr, i, 0); + + /* Shutdown work queue */ + for (i = 0; i < hw_data->num_engines; i++) + WRITE_CSR_ARB_WQCFG(csr, i, 0); + + /* Unmap worker threads to service arbiters */ + for (i = 0; i < hw_data->num_engines; i++) + WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0); + + /* Disable arbitration on all rings */ + for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) + WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0); +} +EXPORT_SYMBOL_GPL(adf_exit_arb); diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c index 245f43237..ac37a8996 100644 --- a/drivers/crypto/qat/qat_common/adf_init.c +++ b/drivers/crypto/qat/qat_common/adf_init.c @@ -69,7 +69,7 @@ static void adf_service_add(struct service_hndl *service) * Function adds the acceleration service to the acceleration framework. * To be used by QAT device specific drivers. * - * Return: 0 on success, error code othewise. + * Return: 0 on success, error code otherwise. */ int adf_service_register(struct service_hndl *service) { @@ -94,7 +94,7 @@ static void adf_service_remove(struct service_hndl *service) * Function remove the acceleration service from the acceleration framework. * To be used by QAT device specific drivers. * - * Return: 0 on success, error code othewise. + * Return: 0 on success, error code otherwise. */ int adf_service_unregister(struct service_hndl *service) { @@ -114,7 +114,7 @@ EXPORT_SYMBOL_GPL(adf_service_unregister); * Initialize the ring data structures and the admin comms and arbitration * services. * - * Return: 0 on success, error code othewise. + * Return: 0 on success, error code otherwise. */ int adf_dev_init(struct adf_accel_dev *accel_dev) { @@ -177,20 +177,6 @@ int adf_dev_init(struct adf_accel_dev *accel_dev) */ list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); - if (!service->admin) - continue; - if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { - dev_err(&GET_DEV(accel_dev), - "Failed to initialise service %s\n", - service->name); - return -EFAULT; - } - set_bit(accel_dev->accel_id, &service->init_status); - } - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); - if (service->admin) - continue; if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { dev_err(&GET_DEV(accel_dev), "Failed to initialise service %s\n", @@ -201,6 +187,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev) } hw_data->enable_error_correction(accel_dev); + hw_data->enable_vf2pf_comms(accel_dev); return 0; } @@ -214,10 +201,11 @@ EXPORT_SYMBOL_GPL(adf_dev_init); * is ready to be used. * To be used by QAT device specific drivers. * - * Return: 0 on success, error code othewise. + * Return: 0 on success, error code otherwise. */ int adf_dev_start(struct adf_accel_dev *accel_dev) { + struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; struct list_head *list_itr; @@ -229,22 +217,13 @@ int adf_dev_start(struct adf_accel_dev *accel_dev) } set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); - if (!service->admin) - continue; - if (service->event_hld(accel_dev, ADF_EVENT_START)) { - dev_err(&GET_DEV(accel_dev), - "Failed to start service %s\n", - service->name); - return -EFAULT; - } - set_bit(accel_dev->accel_id, &service->start_status); + if (hw_data->send_admin_init(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "Failed to send init message\n"); + return -EFAULT; } + list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); - if (service->admin) - continue; if (service->event_hld(accel_dev, ADF_EVENT_START)) { dev_err(&GET_DEV(accel_dev), "Failed to start service %s\n", @@ -257,7 +236,8 @@ int adf_dev_start(struct adf_accel_dev *accel_dev) clear_bit(ADF_STATUS_STARTING, &accel_dev->status); set_bit(ADF_STATUS_STARTED, &accel_dev->status); - if (qat_algs_register()) { + if (!list_empty(&accel_dev->crypto_list) && + (qat_algs_register() || qat_asym_algs_register())) { dev_err(&GET_DEV(accel_dev), "Failed to register crypto algs\n"); set_bit(ADF_STATUS_STARTING, &accel_dev->status); @@ -276,7 +256,7 @@ EXPORT_SYMBOL_GPL(adf_dev_start); * is shuting down. * To be used by QAT device specific drivers. * - * Return: 0 on success, error code othewise. + * Return: 0 on success, error code otherwise. */ int adf_dev_stop(struct adf_accel_dev *accel_dev) { @@ -292,14 +272,15 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev) clear_bit(ADF_STATUS_STARTING, &accel_dev->status); clear_bit(ADF_STATUS_STARTED, &accel_dev->status); - if (qat_algs_unregister()) + if (!list_empty(&accel_dev->crypto_list) && qat_algs_unregister()) dev_err(&GET_DEV(accel_dev), "Failed to unregister crypto algs\n"); + if (!list_empty(&accel_dev->crypto_list)) + qat_asym_algs_unregister(); + list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); - if (service->admin) - continue; if (!test_bit(accel_dev->accel_id, &service->start_status)) continue; ret = service->event_hld(accel_dev, ADF_EVENT_STOP); @@ -310,19 +291,6 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev) clear_bit(accel_dev->accel_id, &service->start_status); } } - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); - if (!service->admin) - continue; - if (!test_bit(accel_dev->accel_id, &service->start_status)) - continue; - if (service->event_hld(accel_dev, ADF_EVENT_STOP)) - dev_err(&GET_DEV(accel_dev), - "Failed to shutdown service %s\n", - service->name); - else - clear_bit(accel_dev->accel_id, &service->start_status); - } if (wait) msleep(100); @@ -373,21 +341,6 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev) list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); - if (service->admin) - continue; - if (!test_bit(accel_dev->accel_id, &service->init_status)) - continue; - if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) - dev_err(&GET_DEV(accel_dev), - "Failed to shutdown service %s\n", - service->name); - else - clear_bit(accel_dev->accel_id, &service->init_status); - } - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); - if (!service->admin) - continue; if (!test_bit(accel_dev->accel_id, &service->init_status)) continue; if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) @@ -413,6 +366,7 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev) if (hw_data->exit_admin_comms) hw_data->exit_admin_comms(accel_dev); + hw_data->disable_iov(accel_dev); adf_cleanup_etr_data(accel_dev); } EXPORT_SYMBOL_GPL(adf_dev_shutdown); @@ -424,17 +378,6 @@ int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); - if (service->admin) - continue; - if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) - dev_err(&GET_DEV(accel_dev), - "Failed to restart service %s.\n", - service->name); - } - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); - if (!service->admin) - continue; if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) dev_err(&GET_DEV(accel_dev), "Failed to restart service %s.\n", @@ -450,17 +393,6 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); - if (service->admin) - continue; - if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) - dev_err(&GET_DEV(accel_dev), - "Failed to restart service %s.\n", - service->name); - } - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); - if (!service->admin) - continue; if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) dev_err(&GET_DEV(accel_dev), "Failed to restart service %s.\n", diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c new file mode 100644 index 000000000..5fdbad809 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c @@ -0,0 +1,438 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2015 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2015 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_pf2vf_msg.h" + +#define ADF_DH895XCC_EP_OFFSET 0x3A000 +#define ADF_DH895XCC_ERRMSK3 (ADF_DH895XCC_EP_OFFSET + 0x1C) +#define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9) +#define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC) +#define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16) + +/** + * adf_enable_pf2vf_interrupts() - Enable PF to VF interrupts + * @accel_dev: Pointer to acceleration device. + * + * Function enables PF to VF interrupts + */ +void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + void __iomem *pmisc_bar_addr = + pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr; + + ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0); +} +EXPORT_SYMBOL_GPL(adf_enable_pf2vf_interrupts); + +/** + * adf_disable_pf2vf_interrupts() - Disable PF to VF interrupts + * @accel_dev: Pointer to acceleration device. + * + * Function disables PF to VF interrupts + */ +void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + void __iomem *pmisc_bar_addr = + pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr; + + ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2); +} +EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts); + +void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, + u32 vf_mask) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *pmisc = + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; + void __iomem *pmisc_addr = pmisc->virt_addr; + u32 reg; + + /* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */ + if (vf_mask & 0xFFFF) { + reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3); + reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask); + ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg); + } + + /* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */ + if (vf_mask >> 16) { + reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5); + reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask); + ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg); + } +} + +/** + * adf_disable_pf2vf_interrupts() - Disable VF to PF interrupts + * @accel_dev: Pointer to acceleration device. + * + * Function disables VF to PF interrupts + */ +void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *pmisc = + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; + void __iomem *pmisc_addr = pmisc->virt_addr; + u32 reg; + + /* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */ + if (vf_mask & 0xFFFF) { + reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) | + ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask); + ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg); + } + + /* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */ + if (vf_mask >> 16) { + reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) | + ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask); + ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg); + } +} +EXPORT_SYMBOL_GPL(adf_disable_vf2pf_interrupts); + +static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) +{ + struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + void __iomem *pmisc_bar_addr = + pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr; + u32 val, pf2vf_offset, count = 0; + u32 local_in_use_mask, local_in_use_pattern; + u32 remote_in_use_mask, remote_in_use_pattern; + struct mutex *lock; /* lock preventing concurrent acces of CSR */ + u32 int_bit; + int ret = 0; + + if (accel_dev->is_vf) { + pf2vf_offset = hw_data->get_pf2vf_offset(0); + lock = &accel_dev->vf.vf2pf_lock; + local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK; + local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF; + remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK; + remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF; + int_bit = ADF_VF2PF_INT; + } else { + pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr); + lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock; + local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK; + local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF; + remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK; + remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF; + int_bit = ADF_PF2VF_INT; + } + + mutex_lock(lock); + + /* Check if PF2VF CSR is in use by remote function */ + val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); + if ((val & remote_in_use_mask) == remote_in_use_pattern) { + dev_dbg(&GET_DEV(accel_dev), + "PF2VF CSR in use by remote function\n"); + ret = -EBUSY; + goto out; + } + + /* Attempt to get ownership of PF2VF CSR */ + msg &= ~local_in_use_mask; + msg |= local_in_use_pattern; + ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg); + + /* Wait in case remote func also attempting to get ownership */ + msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY); + + val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); + if ((val & local_in_use_mask) != local_in_use_pattern) { + dev_dbg(&GET_DEV(accel_dev), + "PF2VF CSR in use by remote - collision detected\n"); + ret = -EBUSY; + goto out; + } + + /* + * This function now owns the PV2VF CSR. The IN_USE_BY pattern must + * remain in the PF2VF CSR for all writes including ACK from remote + * until this local function relinquishes the CSR. Send the message + * by interrupting the remote. + */ + ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit); + + /* Wait for confirmation from remote func it received the message */ + do { + msleep(ADF_IOV_MSG_ACK_DELAY); + val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); + } while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY)); + + if (val & int_bit) { + dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n"); + val &= ~int_bit; + ret = -EIO; + } + + /* Finished with PF2VF CSR; relinquish it and leave msg in CSR */ + ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask); +out: + mutex_unlock(lock); + return ret; +} + +/** + * adf_iov_putmsg() - send PF2VF message + * @accel_dev: Pointer to acceleration device. + * @msg: Message to send + * @vf_nr: VF number to which the message will be sent + * + * Function sends a messge from the PF to a VF + * + * Return: 0 on success, error code otherwise. + */ +int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) +{ + u32 count = 0; + int ret; + + do { + ret = __adf_iov_putmsg(accel_dev, msg, vf_nr); + if (ret) + msleep(ADF_IOV_MSG_RETRY_DELAY); + } while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES)); + + return ret; +} +EXPORT_SYMBOL_GPL(adf_iov_putmsg); + +void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info) +{ + struct adf_accel_dev *accel_dev = vf_info->accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + int bar_id = hw_data->get_misc_bar_id(hw_data); + struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id]; + void __iomem *pmisc_addr = pmisc->virt_addr; + u32 msg, resp = 0, vf_nr = vf_info->vf_nr; + + /* Read message from the VF */ + msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr)); + + /* To ACK, clear the VF2PFINT bit */ + msg &= ~ADF_VF2PF_INT; + ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg); + + if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM)) + /* Ignore legacy non-system (non-kernel) VF2PF messages */ + goto err; + + switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) { + case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ: + { + u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT; + + resp = (ADF_PF2VF_MSGORIGIN_SYSTEM | + (ADF_PF2VF_MSGTYPE_VERSION_RESP << + ADF_PF2VF_MSGTYPE_SHIFT) | + (ADF_PFVF_COMPATIBILITY_VERSION << + ADF_PF2VF_VERSION_RESP_VERS_SHIFT)); + + dev_dbg(&GET_DEV(accel_dev), + "Compatibility Version Request from VF%d vers=%u\n", + vf_nr + 1, vf_compat_ver); + + if (vf_compat_ver < hw_data->min_iov_compat_ver) { + dev_err(&GET_DEV(accel_dev), + "VF (vers %d) incompatible with PF (vers %d)\n", + vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION); + resp |= ADF_PF2VF_VF_INCOMPATIBLE << + ADF_PF2VF_VERSION_RESP_RESULT_SHIFT; + } else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) { + dev_err(&GET_DEV(accel_dev), + "VF (vers %d) compat with PF (vers %d) unkn.\n", + vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION); + resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN << + ADF_PF2VF_VERSION_RESP_RESULT_SHIFT; + } else { + dev_dbg(&GET_DEV(accel_dev), + "VF (vers %d) compatible with PF (vers %d)\n", + vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION); + resp |= ADF_PF2VF_VF_COMPATIBLE << + ADF_PF2VF_VERSION_RESP_RESULT_SHIFT; + } + } + break; + case ADF_VF2PF_MSGTYPE_VERSION_REQ: + dev_dbg(&GET_DEV(accel_dev), + "Legacy VersionRequest received from VF%d 0x%x\n", + vf_nr + 1, msg); + resp = (ADF_PF2VF_MSGORIGIN_SYSTEM | + (ADF_PF2VF_MSGTYPE_VERSION_RESP << + ADF_PF2VF_MSGTYPE_SHIFT) | + (ADF_PFVF_COMPATIBILITY_VERSION << + ADF_PF2VF_VERSION_RESP_VERS_SHIFT)); + resp |= ADF_PF2VF_VF_COMPATIBLE << + ADF_PF2VF_VERSION_RESP_RESULT_SHIFT; + /* Set legacy major and minor version num */ + resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT | + 1 << ADF_PF2VF_MINORVERSION_SHIFT; + break; + case ADF_VF2PF_MSGTYPE_INIT: + { + dev_dbg(&GET_DEV(accel_dev), + "Init message received from VF%d 0x%x\n", + vf_nr + 1, msg); + vf_info->init = true; + } + break; + case ADF_VF2PF_MSGTYPE_SHUTDOWN: + { + dev_dbg(&GET_DEV(accel_dev), + "Shutdown message received from VF%d 0x%x\n", + vf_nr + 1, msg); + vf_info->init = false; + } + break; + default: + goto err; + } + + if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr)) + dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n"); + + /* re-enable interrupt on PF from this VF */ + adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr)); + return; +err: + dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n", + vf_nr + 1, msg); +} + +void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_vf_info *vf; + u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM | + (ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT)); + int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + + for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { + if (vf->init && adf_iov_putmsg(accel_dev, msg, i)) + dev_err(&GET_DEV(accel_dev), + "Failed to send restarting msg to VF%d\n", i); + } +} + +static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev) +{ + unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT); + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u32 msg = 0; + int ret; + + msg = ADF_VF2PF_MSGORIGIN_SYSTEM; + msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT; + msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT; + BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255); + + /* Send request from VF to PF */ + ret = adf_iov_putmsg(accel_dev, msg, 0); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to send Compatibility Version Request.\n"); + return ret; + } + + /* Wait for response */ + if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion, + timeout)) { + dev_err(&GET_DEV(accel_dev), + "IOV request/response message timeout expired\n"); + return -EIO; + } + + /* Response from PF received, check compatibility */ + switch (accel_dev->vf.compatible) { + case ADF_PF2VF_VF_COMPATIBLE: + break; + case ADF_PF2VF_VF_COMPAT_UNKNOWN: + /* VF is newer than PF and decides whether it is compatible */ + if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver) + break; + /* fall through */ + case ADF_PF2VF_VF_INCOMPATIBLE: + dev_err(&GET_DEV(accel_dev), + "PF (vers %d) and VF (vers %d) are not compatible\n", + accel_dev->vf.pf_version, + ADF_PFVF_COMPATIBILITY_VERSION); + return -EINVAL; + default: + dev_err(&GET_DEV(accel_dev), + "Invalid response from PF; assume not compatible\n"); + return -EINVAL; + } + return ret; +} + +/** + * adf_enable_vf2pf_comms() - Function enables communication from vf to pf + * + * @accel_dev: Pointer to acceleration device virtual function. + * + * Return: 0 on success, error code otherwise. + */ +int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev) +{ + adf_enable_pf2vf_interrupts(accel_dev); + return adf_vf2pf_request_version(accel_dev); +} +EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms); diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h new file mode 100644 index 000000000..5acd531a1 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h @@ -0,0 +1,146 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2015 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2015 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_PF2VF_MSG_H +#define ADF_PF2VF_MSG_H + +/* + * PF<->VF Messaging + * The PF has an array of 32-bit PF2VF registers, one for each VF. The + * PF can access all these registers; each VF can access only the one + * register associated with that particular VF. + * + * The register functionally is split into two parts: + * The bottom half is for PF->VF messages. In particular when the first + * bit of this register (bit 0) gets set an interrupt will be triggered + * in the respective VF. + * The top half is for VF->PF messages. In particular when the first bit + * of this half of register (bit 16) gets set an interrupt will be triggered + * in the PF. + * + * The remaining bits within this register are available to encode messages. + * and implement a collision control mechanism to prevent concurrent use of + * the PF2VF register by both the PF and VF. + * + * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 + * _______________________________________________ + * | | | | | | | | | | | | | | | | | + * +-----------------------------------------------+ + * \___________________________/ \_________/ ^ ^ + * ^ ^ | | + * | | | VF2PF Int + * | | Message Origin + * | Message Type + * Message-specific Data/Reserved + * + * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 + * _______________________________________________ + * | | | | | | | | | | | | | | | | | + * +-----------------------------------------------+ + * \___________________________/ \_________/ ^ ^ + * ^ ^ | | + * | | | PF2VF Int + * | | Message Origin + * | Message Type + * Message-specific Data/Reserved + * + * Message Origin (Should always be 1) + * A legacy out-of-tree QAT driver allowed for a set of messages not supported + * by this driver; these had a Msg Origin of 0 and are ignored by this driver. + * + * When a PF or VF attempts to send a message in the lower or upper 16 bits, + * respectively, the other 16 bits are written to first with a defined + * IN_USE_BY pattern as part of a collision control scheme (see adf_iov_putmsg). + */ + +#define ADF_PFVF_COMPATIBILITY_VERSION 0x1 /* PF<->VF compat */ + +/* PF->VF messages */ +#define ADF_PF2VF_INT BIT(0) +#define ADF_PF2VF_MSGORIGIN_SYSTEM BIT(1) +#define ADF_PF2VF_MSGTYPE_MASK 0x0000003C +#define ADF_PF2VF_MSGTYPE_SHIFT 2 +#define ADF_PF2VF_MSGTYPE_RESTARTING 0x01 +#define ADF_PF2VF_MSGTYPE_VERSION_RESP 0x02 +#define ADF_PF2VF_IN_USE_BY_PF 0x6AC20000 +#define ADF_PF2VF_IN_USE_BY_PF_MASK 0xFFFE0000 + +/* PF->VF Version Response */ +#define ADF_PF2VF_VERSION_RESP_VERS_MASK 0x00003FC0 +#define ADF_PF2VF_VERSION_RESP_VERS_SHIFT 6 +#define ADF_PF2VF_VERSION_RESP_RESULT_MASK 0x0000C000 +#define ADF_PF2VF_VERSION_RESP_RESULT_SHIFT 14 +#define ADF_PF2VF_MINORVERSION_SHIFT 6 +#define ADF_PF2VF_MAJORVERSION_SHIFT 10 +#define ADF_PF2VF_VF_COMPATIBLE 1 +#define ADF_PF2VF_VF_INCOMPATIBLE 2 +#define ADF_PF2VF_VF_COMPAT_UNKNOWN 3 + +/* VF->PF messages */ +#define ADF_VF2PF_IN_USE_BY_VF 0x00006AC2 +#define ADF_VF2PF_IN_USE_BY_VF_MASK 0x0000FFFE +#define ADF_VF2PF_INT BIT(16) +#define ADF_VF2PF_MSGORIGIN_SYSTEM BIT(17) +#define ADF_VF2PF_MSGTYPE_MASK 0x003C0000 +#define ADF_VF2PF_MSGTYPE_SHIFT 18 +#define ADF_VF2PF_MSGTYPE_INIT 0x3 +#define ADF_VF2PF_MSGTYPE_SHUTDOWN 0x4 +#define ADF_VF2PF_MSGTYPE_VERSION_REQ 0x5 +#define ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ 0x6 + +/* VF->PF Compatible Version Request */ +#define ADF_VF2PF_COMPAT_VER_REQ_SHIFT 22 + +/* Collision detection */ +#define ADF_IOV_MSG_COLLISION_DETECT_DELAY 10 +#define ADF_IOV_MSG_ACK_DELAY 2 +#define ADF_IOV_MSG_ACK_MAX_RETRY 100 +#define ADF_IOV_MSG_RETRY_DELAY 5 +#define ADF_IOV_MSG_MAX_RETRIES 3 +#define ADF_IOV_MSG_RESP_TIMEOUT (ADF_IOV_MSG_ACK_DELAY * \ + ADF_IOV_MSG_ACK_MAX_RETRY + \ + ADF_IOV_MSG_COLLISION_DETECT_DELAY) +#endif /* ADF_IOV_MSG_H */ diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c new file mode 100644 index 000000000..2f77a4a8c --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_sriov.c @@ -0,0 +1,309 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2015 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2015 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include +#include "adf_common_drv.h" +#include "adf_cfg.h" +#include "adf_pf2vf_msg.h" + +static struct workqueue_struct *pf2vf_resp_wq; + +#define ME2FUNCTION_MAP_A_OFFSET (0x3A400 + 0x190) +#define ME2FUNCTION_MAP_A_NUM_REGS 96 + +#define ME2FUNCTION_MAP_B_OFFSET (0x3A400 + 0x310) +#define ME2FUNCTION_MAP_B_NUM_REGS 12 + +#define ME2FUNCTION_MAP_REG_SIZE 4 +#define ME2FUNCTION_MAP_VALID BIT(7) + +#define READ_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index) \ + ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET + \ + ME2FUNCTION_MAP_REG_SIZE * index) + +#define WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index, value) \ + ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET + \ + ME2FUNCTION_MAP_REG_SIZE * index, value) + +#define READ_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index) \ + ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET + \ + ME2FUNCTION_MAP_REG_SIZE * index) + +#define WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index, value) \ + ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET + \ + ME2FUNCTION_MAP_REG_SIZE * index, value) + +struct adf_pf2vf_resp { + struct work_struct pf2vf_resp_work; + struct adf_accel_vf_info *vf_info; +}; + +static void adf_iov_send_resp(struct work_struct *work) +{ + struct adf_pf2vf_resp *pf2vf_resp = + container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work); + + adf_vf2pf_req_hndl(pf2vf_resp->vf_info); + kfree(pf2vf_resp); +} + +static void adf_vf2pf_bh_handler(void *data) +{ + struct adf_accel_vf_info *vf_info = (struct adf_accel_vf_info *)data; + struct adf_pf2vf_resp *pf2vf_resp; + + pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC); + if (!pf2vf_resp) + return; + + pf2vf_resp->vf_info = vf_info; + INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp); + queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work); +} + +static int adf_enable_sriov(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + int totalvfs = pci_sriov_get_totalvfs(pdev); + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *pmisc = + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; + void __iomem *pmisc_addr = pmisc->virt_addr; + struct adf_accel_vf_info *vf_info; + int i; + u32 reg; + + /* Workqueue for PF2VF responses */ + pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq"); + if (!pf2vf_resp_wq) + return -ENOMEM; + + for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs; + i++, vf_info++) { + /* This ptr will be populated when VFs will be created */ + vf_info->accel_dev = accel_dev; + vf_info->vf_nr = i; + + tasklet_init(&vf_info->vf2pf_bh_tasklet, + (void *)adf_vf2pf_bh_handler, + (unsigned long)vf_info); + mutex_init(&vf_info->pf2vf_lock); + ratelimit_state_init(&vf_info->vf2pf_ratelimit, + DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + } + + /* Set Valid bits in ME Thread to PCIe Function Mapping Group A */ + for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) { + reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i); + reg |= ME2FUNCTION_MAP_VALID; + WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg); + } + + /* Set Valid bits in ME Thread to PCIe Function Mapping Group B */ + for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) { + reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i); + reg |= ME2FUNCTION_MAP_VALID; + WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg); + } + + /* Enable VF to PF interrupts for all VFs */ + adf_enable_vf2pf_interrupts(accel_dev, GENMASK_ULL(totalvfs - 1, 0)); + + /* + * Due to the hardware design, when SR-IOV and the ring arbiter + * are enabled all the VFs supported in hardware must be enabled in + * order for all the hardware resources (i.e. bundles) to be usable. + * When SR-IOV is enabled, each of the VFs will own one bundle. + */ + return pci_enable_sriov(pdev, totalvfs); +} + +/** + * adf_disable_sriov() - Disable SRIOV for the device + * @pdev: Pointer to pci device. + * + * Function disables SRIOV for the pci device. + * + * Return: 0 on success, error code otherwise. + */ +void adf_disable_sriov(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *pmisc = + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; + void __iomem *pmisc_addr = pmisc->virt_addr; + int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev)); + struct adf_accel_vf_info *vf; + u32 reg; + int i; + + if (!accel_dev->pf.vf_info) + return; + + adf_pf2vf_notify_restarting(accel_dev); + + pci_disable_sriov(accel_to_pci_dev(accel_dev)); + + /* Disable VF to PF interrupts */ + adf_disable_vf2pf_interrupts(accel_dev, 0xFFFFFFFF); + + /* Clear Valid bits in ME Thread to PCIe Function Mapping Group A */ + for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) { + reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i); + reg &= ~ME2FUNCTION_MAP_VALID; + WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg); + } + + /* Clear Valid bits in ME Thread to PCIe Function Mapping Group B */ + for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) { + reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i); + reg &= ~ME2FUNCTION_MAP_VALID; + WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg); + } + + for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) { + tasklet_disable(&vf->vf2pf_bh_tasklet); + tasklet_kill(&vf->vf2pf_bh_tasklet); + mutex_destroy(&vf->pf2vf_lock); + } + + kfree(accel_dev->pf.vf_info); + accel_dev->pf.vf_info = NULL; + + if (pf2vf_resp_wq) { + destroy_workqueue(pf2vf_resp_wq); + pf2vf_resp_wq = NULL; + } +} +EXPORT_SYMBOL_GPL(adf_disable_sriov); + +/** + * adf_sriov_configure() - Enable SRIOV for the device + * @pdev: Pointer to pci device. + * + * Function enables SRIOV for the pci device. + * + * Return: 0 on success, error code otherwise. + */ +int adf_sriov_configure(struct pci_dev *pdev, int numvfs) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + int totalvfs = pci_sriov_get_totalvfs(pdev); + unsigned long val; + int ret; + + if (!accel_dev) { + dev_err(&pdev->dev, "Failed to find accel_dev\n"); + return -EFAULT; + } + + if (!iommu_present(&pci_bus_type)) { + dev_err(&pdev->dev, + "IOMMU must be enabled for SR-IOV to work\n"); + return -EINVAL; + } + + if (accel_dev->pf.vf_info) { + dev_info(&pdev->dev, "Already enabled for this device\n"); + return -EINVAL; + } + + if (adf_dev_started(accel_dev)) { + if (adf_devmgr_in_reset(accel_dev) || + adf_dev_in_use(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "Device busy\n"); + return -EBUSY; + } + + if (adf_dev_stop(accel_dev)) { + dev_err(&GET_DEV(accel_dev), + "Failed to stop qat_dev%d\n", + accel_dev->accel_id); + return -EFAULT; + } + + adf_dev_shutdown(accel_dev); + } + + if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) + return -EFAULT; + val = 0; + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + ADF_NUM_CY, (void *)&val, ADF_DEC)) + return -EFAULT; + + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + + /* Allocate memory for VF info structs */ + accel_dev->pf.vf_info = kcalloc(totalvfs, + sizeof(struct adf_accel_vf_info), + GFP_KERNEL); + if (!accel_dev->pf.vf_info) + return -ENOMEM; + + if (adf_dev_init(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "Failed to init qat_dev%d\n", + accel_dev->accel_id); + return -EFAULT; + } + + if (adf_dev_start(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n", + accel_dev->accel_id); + return -EFAULT; + } + + ret = adf_enable_sriov(accel_dev); + if (ret) + return ret; + + return numvfs; +} +EXPORT_SYMBOL_GPL(adf_sriov_configure); diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index db2926bff..3865ae8d9 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -264,6 +264,10 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, dev_err(&GET_DEV(accel_dev), "Can't get ring number\n"); return -EFAULT; } + if (ring_num >= ADF_ETR_MAX_RINGS_PER_BANK) { + dev_err(&GET_DEV(accel_dev), "Invalid ring number\n"); + return -EFAULT; + } bank = &transport_data->banks[bank_num]; if (adf_reserve_ring(bank, ring_num)) { @@ -285,7 +289,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, goto err; /* Enable HW arbitration for the given ring */ - accel_dev->hw_device->hw_arb_ring_enable(ring); + adf_update_ring_arb(ring); if (adf_ring_debugfs_add(ring, ring_name)) { dev_err(&GET_DEV(accel_dev), @@ -302,14 +306,13 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, err: adf_cleanup_ring(ring); adf_unreserve_ring(bank, ring_num); - accel_dev->hw_device->hw_arb_ring_disable(ring); + adf_update_ring_arb(ring); return ret; } void adf_remove_ring(struct adf_etr_ring_data *ring) { struct adf_etr_bank_data *bank = ring->bank; - struct adf_accel_dev *accel_dev = bank->accel_dev; /* Disable interrupts for the given ring */ adf_disable_ring_irq(bank, ring->ring_number); @@ -322,7 +325,7 @@ void adf_remove_ring(struct adf_etr_ring_data *ring) adf_ring_debugfs_rm(ring); adf_unreserve_ring(bank, ring->ring_number); /* Disable HW arbitration for the given ring */ - accel_dev->hw_device->hw_arb_ring_disable(ring); + adf_update_ring_arb(ring); adf_cleanup_ring(ring); } @@ -463,7 +466,7 @@ err: * acceleration device accel_dev. * To be used by QAT device specific drivers. * - * Return: 0 on success, error code othewise. + * Return: 0 on success, error code otherwise. */ int adf_init_etr_data(struct adf_accel_dev *accel_dev) { diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h index 160c9a36c..6ad7e4e1e 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h +++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h @@ -97,8 +97,9 @@ #define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7) /* Minimum ring bufer size for memory allocation */ -#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \ - ADF_RING_SIZE_4K : SIZE) +#define ADF_RING_SIZE_BYTES_MIN(SIZE) \ + ((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \ + ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE) #define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6) #define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \ SIZE) & ~0x4) diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c index e41986967..52340b9bb 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_debug.c +++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c @@ -86,9 +86,7 @@ static int adf_ring_show(struct seq_file *sfile, void *v) { struct adf_etr_ring_data *ring = sfile->private; struct adf_etr_bank_data *bank = ring->bank; - uint32_t *msg = v; void __iomem *csr = ring->bank->csr_addr; - int i, x; if (v == SEQ_START_TOKEN) { int head, tail, empty; @@ -113,18 +111,8 @@ static int adf_ring_show(struct seq_file *sfile, void *v) seq_puts(sfile, "----------- Ring data ------------\n"); return 0; } - seq_printf(sfile, "%p:", msg); - x = 0; - i = 0; - for (; i < (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2); i++) { - seq_printf(sfile, " %08X", *(msg + i)); - if ((ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2) != i + 1 && - (++x == 8)) { - seq_printf(sfile, "\n%p:", msg + i + 1); - x = 0; - } - } - seq_puts(sfile, "\n"); + seq_hex_dump(sfile, "", DUMP_PREFIX_ADDRESS, 32, 4, + v, ADF_MSG_SIZE_TO_BYTES(ring->msg_size), false); return 0; } diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h index f1e30e24a..46747f01b 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h @@ -249,6 +249,8 @@ struct icp_qat_fw_comn_resp { #define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7 #define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1 +#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6 +#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1 #define QAT_COMN_RESP_CMP_STATUS_BITPOS 5 #define QAT_COMN_RESP_CMP_STATUS_MASK 0x1 #define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4 diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h new file mode 100644 index 000000000..0d7a9b51c --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h @@ -0,0 +1,112 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef _ICP_QAT_FW_PKE_ +#define _ICP_QAT_FW_PKE_ + +#include "icp_qat_fw.h" + +struct icp_qat_fw_req_hdr_pke_cd_pars { + u64 content_desc_addr; + u32 content_desc_resrvd; + u32 func_id; +}; + +struct icp_qat_fw_req_pke_mid { + u64 opaque; + u64 src_data_addr; + u64 dest_data_addr; +}; + +struct icp_qat_fw_req_pke_hdr { + u8 resrvd1; + u8 resrvd2; + u8 service_type; + u8 hdr_flags; + u16 comn_req_flags; + u16 resrvd4; + struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars; +}; + +struct icp_qat_fw_pke_request { + struct icp_qat_fw_req_pke_hdr pke_hdr; + struct icp_qat_fw_req_pke_mid pke_mid; + u8 output_param_count; + u8 input_param_count; + u16 resrvd1; + u32 resrvd2; + u64 next_req_adr; +}; + +struct icp_qat_fw_resp_pke_hdr { + u8 resrvd1; + u8 resrvd2; + u8 response_type; + u8 hdr_flags; + u16 comn_resp_flags; + u16 resrvd4; +}; + +struct icp_qat_fw_pke_resp { + struct icp_qat_fw_resp_pke_hdr pke_resp_hdr; + u64 opaque; + u64 src_data_addr; + u64 dest_data_addr; +}; + +#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS 7 +#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK 0x1 +#define ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(status_word) \ + QAT_FIELD_GET(((status_word >> ICP_QAT_FW_COMN_ONE_BYTE_SHIFT) & \ + ICP_QAT_FW_COMN_SINGLE_BYTE_MASK), \ + QAT_COMN_RESP_PKE_STATUS_BITPOS, \ + QAT_COMN_RESP_PKE_STATUS_MASK) + +#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(hdr_t, val) \ + QAT_FIELD_SET((hdr_t.hdr_flags), (val), \ + ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS, \ + ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK) +#endif diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index df427c0e9..2bd913ace 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -53,7 +53,6 @@ #include #include #include -#include #include #include "adf_accel_devices.h" #include "adf_transport.h" @@ -113,9 +112,6 @@ struct qat_alg_aead_ctx { struct crypto_shash *hash_tfm; enum icp_qat_hw_auth_algo qat_hash_alg; struct qat_crypto_instance *inst; - struct crypto_tfm *tfm; - uint8_t salt[AES_BLOCK_SIZE]; - spinlock_t lock; /* protects qat_alg_aead_ctx struct */ }; struct qat_alg_ablkcipher_ctx { @@ -130,11 +126,6 @@ struct qat_alg_ablkcipher_ctx { spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */ }; -static int get_current_node(void) -{ - return cpu_data(current_thread_info()->cpu).phys_proc_id; -} - static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) { switch (qat_hash_alg) { @@ -278,12 +269,12 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header) ICP_QAT_FW_LA_NO_UPDATE_STATE); } -static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx, +static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm, int alg, struct crypto_authenc_keys *keys) { - struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); - unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; + struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm); + unsigned int digestsize = crypto_aead_authsize(aead_tfm); struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd; struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher; struct icp_qat_hw_auth_algo_blk *hash = @@ -358,12 +349,12 @@ static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx, return 0; } -static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx, +static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm, int alg, struct crypto_authenc_keys *keys) { - struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); - unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; + struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm); + unsigned int digestsize = crypto_aead_authsize(aead_tfm); struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd; struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash; struct icp_qat_hw_cipher_algo_blk *cipher = @@ -515,30 +506,27 @@ static int qat_alg_validate_key(int key_len, int *alg) return 0; } -static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx, +static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const uint8_t *key, unsigned int keylen) { struct crypto_authenc_keys keys; int alg; - if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE)) - return -EFAULT; - if (crypto_authenc_extractkeys(&keys, key, keylen)) goto bad_key; if (qat_alg_validate_key(keys.enckeylen, &alg)) goto bad_key; - if (qat_alg_aead_init_enc_session(ctx, alg, &keys)) + if (qat_alg_aead_init_enc_session(tfm, alg, &keys)) goto error; - if (qat_alg_aead_init_dec_session(ctx, alg, &keys)) + if (qat_alg_aead_init_dec_session(tfm, alg, &keys)) goto error; return 0; bad_key: - crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; error: return -EFAULT; @@ -567,7 +555,6 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); struct device *dev; - spin_lock(&ctx->lock); if (ctx->enc_cd) { /* rekeying */ dev = &GET_DEV(ctx->inst->accel_dev); @@ -581,7 +568,6 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, struct qat_crypto_instance *inst = qat_crypto_get_instance_node(node); if (!inst) { - spin_unlock(&ctx->lock); return -EINVAL; } @@ -591,19 +577,16 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, &ctx->enc_cd_paddr, GFP_ATOMIC); if (!ctx->enc_cd) { - spin_unlock(&ctx->lock); return -ENOMEM; } ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), &ctx->dec_cd_paddr, GFP_ATOMIC); if (!ctx->dec_cd) { - spin_unlock(&ctx->lock); goto out_free_enc; } } - spin_unlock(&ctx->lock); - if (qat_alg_aead_init_sessions(ctx, key, keylen)) + if (qat_alg_aead_init_sessions(tfm, key, keylen)) goto out_free_all; return 0; @@ -654,22 +637,20 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, } static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, - struct scatterlist *assoc, int assoclen, struct scatterlist *sgl, - struct scatterlist *sglout, uint8_t *iv, - uint8_t ivlen, + struct scatterlist *sglout, struct qat_crypto_request *qat_req) { struct device *dev = &GET_DEV(inst->accel_dev); - int i, bufs = 0, sg_nctr = 0; - int n = sg_nents(sgl), assoc_n = sg_nents(assoc); + int i, sg_nctr = 0; + int n = sg_nents(sgl); struct qat_alg_buf_list *bufl; struct qat_alg_buf_list *buflout = NULL; dma_addr_t blp; dma_addr_t bloutp = 0; struct scatterlist *sg; size_t sz_out, sz = sizeof(struct qat_alg_buf_list) + - ((1 + n + assoc_n) * sizeof(struct qat_alg_buf)); + ((1 + n) * sizeof(struct qat_alg_buf)); if (unlikely(!n)) return -EINVAL; @@ -683,35 +664,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, if (unlikely(dma_mapping_error(dev, blp))) goto err; - for_each_sg(assoc, sg, assoc_n, i) { - if (!sg->length) - continue; - - if (!(assoclen > 0)) - break; - - bufl->bufers[bufs].addr = - dma_map_single(dev, sg_virt(sg), - min_t(int, assoclen, sg->length), - DMA_BIDIRECTIONAL); - bufl->bufers[bufs].len = min_t(int, assoclen, sg->length); - if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) - goto err; - bufs++; - assoclen -= sg->length; - } - - if (ivlen) { - bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen, - DMA_BIDIRECTIONAL); - bufl->bufers[bufs].len = ivlen; - if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) - goto err; - bufs++; - } - for_each_sg(sgl, sg, n, i) { - int y = sg_nctr + bufs; + int y = sg_nctr; if (!sg->length) continue; @@ -724,7 +678,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, goto err; sg_nctr++; } - bufl->num_bufs = sg_nctr + bufs; + bufl->num_bufs = sg_nctr; qat_req->buf.bl = bufl; qat_req->buf.blp = blp; qat_req->buf.sz = sz; @@ -734,7 +688,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, n = sg_nents(sglout); sz_out = sizeof(struct qat_alg_buf_list) + - ((1 + n + assoc_n) * sizeof(struct qat_alg_buf)); + ((1 + n) * sizeof(struct qat_alg_buf)); sg_nctr = 0; buflout = kzalloc_node(sz_out, GFP_ATOMIC, dev_to_node(&GET_DEV(inst->accel_dev))); @@ -744,14 +698,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, if (unlikely(dma_mapping_error(dev, bloutp))) goto err; bufers = buflout->bufers; - /* For out of place operation dma map only data and - * reuse assoc mapping and iv */ - for (i = 0; i < bufs; i++) { - bufers[i].len = bufl->bufers[i].len; - bufers[i].addr = bufl->bufers[i].addr; - } for_each_sg(sglout, sg, n, i) { - int y = sg_nctr + bufs; + int y = sg_nctr; if (!sg->length) continue; @@ -764,7 +712,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, bufers[y].len = sg->length; sg_nctr++; } - buflout->num_bufs = sg_nctr + bufs; + buflout->num_bufs = sg_nctr; buflout->num_mapped_bufs = sg_nctr; qat_req->buf.blout = buflout; qat_req->buf.bloutp = bloutp; @@ -778,7 +726,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, err: dev_err(dev, "Failed to map buf for dma\n"); sg_nctr = 0; - for (i = 0; i < n + bufs; i++) + for (i = 0; i < n; i++) if (!dma_mapping_error(dev, bufl->bufers[i].addr)) dma_unmap_single(dev, bufl->bufers[i].addr, bufl->bufers[i].len, @@ -789,7 +737,7 @@ err: kfree(bufl); if (sgl != sglout && buflout) { n = sg_nents(sglout); - for (i = bufs; i < n + bufs; i++) + for (i = 0; i < n; i++) if (!dma_mapping_error(dev, buflout->bufers[i].addr)) dma_unmap_single(dev, buflout->bufers[i].addr, buflout->bufers[i].len, @@ -849,12 +797,10 @@ static int qat_alg_aead_dec(struct aead_request *areq) struct icp_qat_fw_la_cipher_req_params *cipher_param; struct icp_qat_fw_la_auth_req_params *auth_param; struct icp_qat_fw_la_bulk_req *msg; - int digst_size = crypto_aead_crt(aead_tfm)->authsize; + int digst_size = crypto_aead_authsize(aead_tfm); int ret, ctr = 0; - ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen, - areq->src, areq->dst, areq->iv, - AES_BLOCK_SIZE, qat_req); + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); if (unlikely(ret)) return ret; @@ -868,12 +814,11 @@ static int qat_alg_aead_dec(struct aead_request *areq) qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; cipher_param = (void *)&qat_req->req.serv_specif_rqpars; cipher_param->cipher_length = areq->cryptlen - digst_size; - cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE; + cipher_param->cipher_offset = areq->assoclen; memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE); auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); auth_param->auth_off = 0; - auth_param->auth_len = areq->assoclen + - cipher_param->cipher_length + AES_BLOCK_SIZE; + auth_param->auth_len = areq->assoclen + cipher_param->cipher_length; do { ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); } while (ret == -EAGAIN && ctr++ < 10); @@ -885,8 +830,7 @@ static int qat_alg_aead_dec(struct aead_request *areq) return -EINPROGRESS; } -static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv, - int enc_iv) +static int qat_alg_aead_enc(struct aead_request *areq) { struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); @@ -895,11 +839,10 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv, struct icp_qat_fw_la_cipher_req_params *cipher_param; struct icp_qat_fw_la_auth_req_params *auth_param; struct icp_qat_fw_la_bulk_req *msg; + uint8_t *iv = areq->iv; int ret, ctr = 0; - ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen, - areq->src, areq->dst, iv, AES_BLOCK_SIZE, - qat_req); + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); if (unlikely(ret)) return ret; @@ -914,16 +857,12 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv, cipher_param = (void *)&qat_req->req.serv_specif_rqpars; auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); - if (enc_iv) { - cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE; - cipher_param->cipher_offset = areq->assoclen; - } else { - memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE); - cipher_param->cipher_length = areq->cryptlen; - cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE; - } + memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE); + cipher_param->cipher_length = areq->cryptlen; + cipher_param->cipher_offset = areq->assoclen; + auth_param->auth_off = 0; - auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE; + auth_param->auth_len = areq->assoclen + areq->cryptlen; do { ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); @@ -936,25 +875,6 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv, return -EINPROGRESS; } -static int qat_alg_aead_enc(struct aead_request *areq) -{ - return qat_alg_aead_enc_internal(areq, areq->iv, 0); -} - -static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req) -{ - struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq); - struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); - struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); - __be64 seq; - - memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE); - seq = cpu_to_be64(req->seq); - memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t), - &seq, sizeof(uint64_t)); - return qat_alg_aead_enc_internal(&req->areq, req->giv, 1); -} - static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, const uint8_t *key, unsigned int keylen) @@ -1026,8 +946,7 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req) struct icp_qat_fw_la_bulk_req *msg; int ret, ctr = 0; - ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst, - NULL, 0, qat_req); + ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); if (unlikely(ret)) return ret; @@ -1064,8 +983,7 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) struct icp_qat_fw_la_bulk_req *msg; int ret, ctr = 0; - ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst, - NULL, 0, qat_req); + ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); if (unlikely(ret)) return ret; @@ -1092,47 +1010,43 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) return -EINPROGRESS; } -static int qat_alg_aead_init(struct crypto_tfm *tfm, +static int qat_alg_aead_init(struct crypto_aead *tfm, enum icp_qat_hw_auth_algo hash, const char *hash_name) { - struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); + struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); if (IS_ERR(ctx->hash_tfm)) - return -EFAULT; - spin_lock_init(&ctx->lock); + return PTR_ERR(ctx->hash_tfm); ctx->qat_hash_alg = hash; - crypto_aead_set_reqsize(__crypto_aead_cast(tfm), - sizeof(struct aead_request) + - sizeof(struct qat_crypto_request)); - ctx->tfm = tfm; + crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) + + sizeof(struct qat_crypto_request)); return 0; } -static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm) +static int qat_alg_aead_sha1_init(struct crypto_aead *tfm) { return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1"); } -static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm) +static int qat_alg_aead_sha256_init(struct crypto_aead *tfm) { return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256"); } -static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm) +static int qat_alg_aead_sha512_init(struct crypto_aead *tfm) { return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512"); } -static void qat_alg_aead_exit(struct crypto_tfm *tfm) +static void qat_alg_aead_exit(struct crypto_aead *tfm) { - struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); + struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); struct qat_crypto_instance *inst = ctx->inst; struct device *dev; - if (!IS_ERR(ctx->hash_tfm)) - crypto_free_shash(ctx->hash_tfm); + crypto_free_shash(ctx->hash_tfm); if (!inst) return; @@ -1189,73 +1103,61 @@ static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm) qat_crypto_put_instance(inst); } -static struct crypto_alg qat_algs[] = { { - .cra_name = "authenc(hmac(sha1),cbc(aes))", - .cra_driver_name = "qat_aes_cbc_hmac_sha1", - .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_aead_type, - .cra_module = THIS_MODULE, - .cra_init = qat_alg_aead_sha1_init, - .cra_exit = qat_alg_aead_exit, - .cra_u = { - .aead = { - .setkey = qat_alg_aead_setkey, - .decrypt = qat_alg_aead_dec, - .encrypt = qat_alg_aead_enc, - .givencrypt = qat_alg_aead_genivenc, - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = SHA1_DIGEST_SIZE, - }, + +static struct aead_alg qat_aeads[] = { { + .base = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = "qat_aes_cbc_hmac_sha1", + .cra_priority = 4001, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), + .cra_module = THIS_MODULE, }, + .init = qat_alg_aead_sha1_init, + .exit = qat_alg_aead_exit, + .setkey = qat_alg_aead_setkey, + .decrypt = qat_alg_aead_dec, + .encrypt = qat_alg_aead_enc, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, }, { - .cra_name = "authenc(hmac(sha256),cbc(aes))", - .cra_driver_name = "qat_aes_cbc_hmac_sha256", - .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_aead_type, - .cra_module = THIS_MODULE, - .cra_init = qat_alg_aead_sha256_init, - .cra_exit = qat_alg_aead_exit, - .cra_u = { - .aead = { - .setkey = qat_alg_aead_setkey, - .decrypt = qat_alg_aead_dec, - .encrypt = qat_alg_aead_enc, - .givencrypt = qat_alg_aead_genivenc, - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = SHA256_DIGEST_SIZE, - }, + .base = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = "qat_aes_cbc_hmac_sha256", + .cra_priority = 4001, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), + .cra_module = THIS_MODULE, }, + .init = qat_alg_aead_sha256_init, + .exit = qat_alg_aead_exit, + .setkey = qat_alg_aead_setkey, + .decrypt = qat_alg_aead_dec, + .encrypt = qat_alg_aead_enc, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, }, { - .cra_name = "authenc(hmac(sha512),cbc(aes))", - .cra_driver_name = "qat_aes_cbc_hmac_sha512", - .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_aead_type, - .cra_module = THIS_MODULE, - .cra_init = qat_alg_aead_sha512_init, - .cra_exit = qat_alg_aead_exit, - .cra_u = { - .aead = { - .setkey = qat_alg_aead_setkey, - .decrypt = qat_alg_aead_dec, - .encrypt = qat_alg_aead_enc, - .givencrypt = qat_alg_aead_genivenc, - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = SHA512_DIGEST_SIZE, - }, + .base = { + .cra_name = "authenc(hmac(sha512),cbc(aes))", + .cra_driver_name = "qat_aes_cbc_hmac_sha512", + .cra_priority = 4001, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), + .cra_module = THIS_MODULE, }, -}, { + .init = qat_alg_aead_sha512_init, + .exit = qat_alg_aead_exit, + .setkey = qat_alg_aead_setkey, + .decrypt = qat_alg_aead_dec, + .encrypt = qat_alg_aead_enc, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, +} }; + +static struct crypto_alg qat_algs[] = { { .cra_name = "cbc(aes)", .cra_driver_name = "qat_aes_cbc", .cra_priority = 4001, @@ -1281,42 +1183,54 @@ static struct crypto_alg qat_algs[] = { { int qat_algs_register(void) { - int ret = 0; + int ret = 0, i; mutex_lock(&algs_lock); - if (++active_devs == 1) { - int i; + if (++active_devs != 1) + goto unlock; - for (i = 0; i < ARRAY_SIZE(qat_algs); i++) - qat_algs[i].cra_flags = - (qat_algs[i].cra_type == &crypto_aead_type) ? - CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC : - CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; + for (i = 0; i < ARRAY_SIZE(qat_algs); i++) + qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; - ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); - } + ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); + if (ret) + goto unlock; + + for (i = 0; i < ARRAY_SIZE(qat_aeads); i++) + qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC; + + ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads)); + if (ret) + goto unreg_algs; + +unlock: mutex_unlock(&algs_lock); return ret; + +unreg_algs: + crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); + goto unlock; } int qat_algs_unregister(void) { - int ret = 0; - mutex_lock(&algs_lock); - if (--active_devs == 0) - ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); + if (--active_devs != 0) + goto unlock; + + crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads)); + crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); + +unlock: mutex_unlock(&algs_lock); - return ret; + return 0; } int qat_algs_init(void) { - crypto_get_default_rng(); return 0; } void qat_algs_exit(void) { - crypto_put_default_rng(); } diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c new file mode 100644 index 000000000..e87f51023 --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c @@ -0,0 +1,652 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include +#include +#include +#include "qat_rsakey-asn1.h" +#include "icp_qat_fw_pke.h" +#include "adf_accel_devices.h" +#include "adf_transport.h" +#include "adf_common_drv.h" +#include "qat_crypto.h" + +static DEFINE_MUTEX(algs_lock); +static unsigned int active_devs; + +struct qat_rsa_input_params { + union { + struct { + dma_addr_t m; + dma_addr_t e; + dma_addr_t n; + } enc; + struct { + dma_addr_t c; + dma_addr_t d; + dma_addr_t n; + } dec; + u64 in_tab[8]; + }; +} __packed __aligned(64); + +struct qat_rsa_output_params { + union { + struct { + dma_addr_t c; + } enc; + struct { + dma_addr_t m; + } dec; + u64 out_tab[8]; + }; +} __packed __aligned(64); + +struct qat_rsa_ctx { + char *n; + char *e; + char *d; + dma_addr_t dma_n; + dma_addr_t dma_e; + dma_addr_t dma_d; + unsigned int key_sz; + struct qat_crypto_instance *inst; +} __packed __aligned(64); + +struct qat_rsa_request { + struct qat_rsa_input_params in; + struct qat_rsa_output_params out; + dma_addr_t phy_in; + dma_addr_t phy_out; + char *src_align; + struct icp_qat_fw_pke_request req; + struct qat_rsa_ctx *ctx; + int err; +} __aligned(64); + +static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp) +{ + struct akcipher_request *areq = (void *)(__force long)resp->opaque; + struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64); + struct device *dev = &GET_DEV(req->ctx->inst->accel_dev); + int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET( + resp->pke_resp_hdr.comn_resp_flags); + char *ptr = areq->dst; + + err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; + + if (req->src_align) + dma_free_coherent(dev, req->ctx->key_sz, req->src_align, + req->in.enc.m); + else + dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz, + DMA_TO_DEVICE); + + dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz, + DMA_FROM_DEVICE); + dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params), + DMA_TO_DEVICE); + dma_unmap_single(dev, req->phy_out, + sizeof(struct qat_rsa_output_params), + DMA_TO_DEVICE); + + areq->dst_len = req->ctx->key_sz; + /* Need to set the corect length of the output */ + while (!(*ptr) && areq->dst_len) { + areq->dst_len--; + ptr++; + } + + if (areq->dst_len != req->ctx->key_sz) + memmove(areq->dst, ptr, areq->dst_len); + + akcipher_request_complete(areq, err); +} + +void qat_alg_asym_callback(void *_resp) +{ + struct icp_qat_fw_pke_resp *resp = _resp; + + qat_rsa_cb(resp); +} + +#define PKE_RSA_EP_512 0x1c161b21 +#define PKE_RSA_EP_1024 0x35111bf7 +#define PKE_RSA_EP_1536 0x4d111cdc +#define PKE_RSA_EP_2048 0x6e111dba +#define PKE_RSA_EP_3072 0x7d111ea3 +#define PKE_RSA_EP_4096 0xa5101f7e + +static unsigned long qat_rsa_enc_fn_id(unsigned int len) +{ + unsigned int bitslen = len << 3; + + switch (bitslen) { + case 512: + return PKE_RSA_EP_512; + case 1024: + return PKE_RSA_EP_1024; + case 1536: + return PKE_RSA_EP_1536; + case 2048: + return PKE_RSA_EP_2048; + case 3072: + return PKE_RSA_EP_3072; + case 4096: + return PKE_RSA_EP_4096; + default: + return 0; + }; +} + +#define PKE_RSA_DP1_512 0x1c161b3c +#define PKE_RSA_DP1_1024 0x35111c12 +#define PKE_RSA_DP1_1536 0x4d111cf7 +#define PKE_RSA_DP1_2048 0x6e111dda +#define PKE_RSA_DP1_3072 0x7d111ebe +#define PKE_RSA_DP1_4096 0xa5101f98 + +static unsigned long qat_rsa_dec_fn_id(unsigned int len) +{ + unsigned int bitslen = len << 3; + + switch (bitslen) { + case 512: + return PKE_RSA_DP1_512; + case 1024: + return PKE_RSA_DP1_1024; + case 1536: + return PKE_RSA_DP1_1536; + case 2048: + return PKE_RSA_DP1_2048; + case 3072: + return PKE_RSA_DP1_3072; + case 4096: + return PKE_RSA_DP1_4096; + default: + return 0; + }; +} + +static int qat_rsa_enc(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct qat_crypto_instance *inst = ctx->inst; + struct device *dev = &GET_DEV(inst->accel_dev); + struct qat_rsa_request *qat_req = + PTR_ALIGN(akcipher_request_ctx(req), 64); + struct icp_qat_fw_pke_request *msg = &qat_req->req; + int ret, ctr = 0; + + if (unlikely(!ctx->n || !ctx->e)) + return -EINVAL; + + if (req->dst_len < ctx->key_sz) { + req->dst_len = ctx->key_sz; + return -EOVERFLOW; + } + memset(msg, '\0', sizeof(*msg)); + ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, + ICP_QAT_FW_COMN_REQ_FLAG_SET); + msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz); + if (unlikely(!msg->pke_hdr.cd_pars.func_id)) + return -EINVAL; + + qat_req->ctx = ctx; + msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; + msg->pke_hdr.comn_req_flags = + ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, + QAT_COMN_CD_FLD_TYPE_64BIT_ADR); + + qat_req->in.enc.e = ctx->dma_e; + qat_req->in.enc.n = ctx->dma_n; + ret = -ENOMEM; + + /* + * src can be of any size in valid range, but HW expects it to be the + * same as modulo n so in case it is different we need to allocate a + * new buf and copy src data. + * In other case we just need to map the user provided buffer. + */ + if (req->src_len < ctx->key_sz) { + int shift = ctx->key_sz - req->src_len; + + qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, + &qat_req->in.enc.m, + GFP_KERNEL); + if (unlikely(!qat_req->src_align)) + return ret; + + memcpy(qat_req->src_align + shift, req->src, req->src_len); + } else { + qat_req->src_align = NULL; + qat_req->in.enc.m = dma_map_single(dev, req->src, req->src_len, + DMA_TO_DEVICE); + } + qat_req->in.in_tab[3] = 0; + qat_req->out.enc.c = dma_map_single(dev, req->dst, req->dst_len, + DMA_FROM_DEVICE); + qat_req->out.out_tab[1] = 0; + qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m, + sizeof(struct qat_rsa_input_params), + DMA_TO_DEVICE); + qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c, + sizeof(struct qat_rsa_output_params), + DMA_TO_DEVICE); + + if (unlikely((!qat_req->src_align && + dma_mapping_error(dev, qat_req->in.enc.m)) || + dma_mapping_error(dev, qat_req->out.enc.c) || + dma_mapping_error(dev, qat_req->phy_in) || + dma_mapping_error(dev, qat_req->phy_out))) + goto unmap; + + msg->pke_mid.src_data_addr = qat_req->phy_in; + msg->pke_mid.dest_data_addr = qat_req->phy_out; + msg->pke_mid.opaque = (uint64_t)(__force long)req; + msg->input_param_count = 3; + msg->output_param_count = 1; + do { + ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); + } while (ret == -EBUSY && ctr++ < 100); + + if (!ret) + return -EINPROGRESS; +unmap: + if (qat_req->src_align) + dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, + qat_req->in.enc.m); + else + if (!dma_mapping_error(dev, qat_req->in.enc.m)) + dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz, + DMA_TO_DEVICE); + if (!dma_mapping_error(dev, qat_req->out.enc.c)) + dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz, + DMA_FROM_DEVICE); + if (!dma_mapping_error(dev, qat_req->phy_in)) + dma_unmap_single(dev, qat_req->phy_in, + sizeof(struct qat_rsa_input_params), + DMA_TO_DEVICE); + if (!dma_mapping_error(dev, qat_req->phy_out)) + dma_unmap_single(dev, qat_req->phy_out, + sizeof(struct qat_rsa_output_params), + DMA_TO_DEVICE); + return ret; +} + +static int qat_rsa_dec(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct qat_crypto_instance *inst = ctx->inst; + struct device *dev = &GET_DEV(inst->accel_dev); + struct qat_rsa_request *qat_req = + PTR_ALIGN(akcipher_request_ctx(req), 64); + struct icp_qat_fw_pke_request *msg = &qat_req->req; + int ret, ctr = 0; + + if (unlikely(!ctx->n || !ctx->d)) + return -EINVAL; + + if (req->dst_len < ctx->key_sz) { + req->dst_len = ctx->key_sz; + return -EOVERFLOW; + } + memset(msg, '\0', sizeof(*msg)); + ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, + ICP_QAT_FW_COMN_REQ_FLAG_SET); + msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz); + if (unlikely(!msg->pke_hdr.cd_pars.func_id)) + return -EINVAL; + + qat_req->ctx = ctx; + msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; + msg->pke_hdr.comn_req_flags = + ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, + QAT_COMN_CD_FLD_TYPE_64BIT_ADR); + + qat_req->in.dec.d = ctx->dma_d; + qat_req->in.dec.n = ctx->dma_n; + ret = -ENOMEM; + + /* + * src can be of any size in valid range, but HW expects it to be the + * same as modulo n so in case it is different we need to allocate a + * new buf and copy src data. + * In other case we just need to map the user provided buffer. + */ + if (req->src_len < ctx->key_sz) { + int shift = ctx->key_sz - req->src_len; + + qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, + &qat_req->in.dec.c, + GFP_KERNEL); + if (unlikely(!qat_req->src_align)) + return ret; + + memcpy(qat_req->src_align + shift, req->src, req->src_len); + } else { + qat_req->src_align = NULL; + qat_req->in.dec.c = dma_map_single(dev, req->src, req->src_len, + DMA_TO_DEVICE); + } + qat_req->in.in_tab[3] = 0; + qat_req->out.dec.m = dma_map_single(dev, req->dst, req->dst_len, + DMA_FROM_DEVICE); + qat_req->out.out_tab[1] = 0; + qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c, + sizeof(struct qat_rsa_input_params), + DMA_TO_DEVICE); + qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m, + sizeof(struct qat_rsa_output_params), + DMA_TO_DEVICE); + + if (unlikely((!qat_req->src_align && + dma_mapping_error(dev, qat_req->in.dec.c)) || + dma_mapping_error(dev, qat_req->out.dec.m) || + dma_mapping_error(dev, qat_req->phy_in) || + dma_mapping_error(dev, qat_req->phy_out))) + goto unmap; + + msg->pke_mid.src_data_addr = qat_req->phy_in; + msg->pke_mid.dest_data_addr = qat_req->phy_out; + msg->pke_mid.opaque = (uint64_t)(__force long)req; + msg->input_param_count = 3; + msg->output_param_count = 1; + do { + ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); + } while (ret == -EBUSY && ctr++ < 100); + + if (!ret) + return -EINPROGRESS; +unmap: + if (qat_req->src_align) + dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, + qat_req->in.dec.c); + else + if (!dma_mapping_error(dev, qat_req->in.dec.c)) + dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz, + DMA_TO_DEVICE); + if (!dma_mapping_error(dev, qat_req->out.dec.m)) + dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz, + DMA_FROM_DEVICE); + if (!dma_mapping_error(dev, qat_req->phy_in)) + dma_unmap_single(dev, qat_req->phy_in, + sizeof(struct qat_rsa_input_params), + DMA_TO_DEVICE); + if (!dma_mapping_error(dev, qat_req->phy_out)) + dma_unmap_single(dev, qat_req->phy_out, + sizeof(struct qat_rsa_output_params), + DMA_TO_DEVICE); + return ret; +} + +int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct qat_rsa_ctx *ctx = context; + struct qat_crypto_instance *inst = ctx->inst; + struct device *dev = &GET_DEV(inst->accel_dev); + const char *ptr = value; + int ret; + + while (!*ptr && vlen) { + ptr++; + vlen--; + } + + ctx->key_sz = vlen; + ret = -EINVAL; + /* In FIPS mode only allow key size 2K & 3K */ + if (fips_enabled && (ctx->key_sz != 256 && ctx->key_sz != 384)) { + pr_err("QAT: RSA: key size not allowed in FIPS mode\n"); + goto err; + } + /* invalid key size provided */ + if (!qat_rsa_enc_fn_id(ctx->key_sz)) + goto err; + + ret = -ENOMEM; + ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); + if (!ctx->n) + goto err; + + memcpy(ctx->n, ptr, ctx->key_sz); + return 0; +err: + ctx->key_sz = 0; + ctx->n = NULL; + return ret; +} + +int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct qat_rsa_ctx *ctx = context; + struct qat_crypto_instance *inst = ctx->inst; + struct device *dev = &GET_DEV(inst->accel_dev); + const char *ptr = value; + + while (!*ptr && vlen) { + ptr++; + vlen--; + } + + if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) { + ctx->e = NULL; + return -EINVAL; + } + + ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); + if (!ctx->e) { + ctx->e = NULL; + return -ENOMEM; + } + memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen); + return 0; +} + +int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct qat_rsa_ctx *ctx = context; + struct qat_crypto_instance *inst = ctx->inst; + struct device *dev = &GET_DEV(inst->accel_dev); + const char *ptr = value; + int ret; + + while (!*ptr && vlen) { + ptr++; + vlen--; + } + + ret = -EINVAL; + if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) + goto err; + + /* In FIPS mode only allow key size 2K & 3K */ + if (fips_enabled && (vlen != 256 && vlen != 384)) { + pr_err("QAT: RSA: key size not allowed in FIPS mode\n"); + goto err; + } + + ret = -ENOMEM; + ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); + if (!ctx->n) + goto err; + + memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen); + return 0; +err: + ctx->d = NULL; + return ret; +} + +static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct device *dev = &GET_DEV(ctx->inst->accel_dev); + int ret; + + /* Free the old key if any */ + if (ctx->n) + dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); + if (ctx->e) + dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); + if (ctx->d) { + memset(ctx->d, '\0', ctx->key_sz); + dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); + } + + ctx->n = NULL; + ctx->e = NULL; + ctx->d = NULL; + ret = asn1_ber_decoder(&qat_rsakey_decoder, ctx, key, keylen); + if (ret < 0) + goto free; + + if (!ctx->n || !ctx->e) { + /* invalid key provided */ + ret = -EINVAL; + goto free; + } + + return 0; +free: + if (ctx->d) { + memset(ctx->d, '\0', ctx->key_sz); + dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); + ctx->d = NULL; + } + if (ctx->e) { + dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); + ctx->e = NULL; + } + if (ctx->n) { + dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); + ctx->n = NULL; + ctx->key_sz = 0; + } + return ret; +} + +static int qat_rsa_init_tfm(struct crypto_akcipher *tfm) +{ + struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct qat_crypto_instance *inst = + qat_crypto_get_instance_node(get_current_node()); + + if (!inst) + return -EINVAL; + + ctx->key_sz = 0; + ctx->inst = inst; + return 0; +} + +static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm) +{ + struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct device *dev = &GET_DEV(ctx->inst->accel_dev); + + if (ctx->n) + dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); + if (ctx->e) + dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); + if (ctx->d) { + memset(ctx->d, '\0', ctx->key_sz); + dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); + } + qat_crypto_put_instance(ctx->inst); + ctx->n = NULL; + ctx->d = NULL; + ctx->d = NULL; +} + +static struct akcipher_alg rsa = { + .encrypt = qat_rsa_enc, + .decrypt = qat_rsa_dec, + .sign = qat_rsa_dec, + .verify = qat_rsa_enc, + .setkey = qat_rsa_setkey, + .init = qat_rsa_init_tfm, + .exit = qat_rsa_exit_tfm, + .reqsize = sizeof(struct qat_rsa_request) + 64, + .base = { + .cra_name = "rsa", + .cra_driver_name = "qat-rsa", + .cra_priority = 1000, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct qat_rsa_ctx), + }, +}; + +int qat_asym_algs_register(void) +{ + int ret = 0; + + mutex_lock(&algs_lock); + if (++active_devs == 1) { + rsa.base.cra_flags = 0; + ret = crypto_register_akcipher(&rsa); + } + mutex_unlock(&algs_lock); + return ret; +} + +void qat_asym_algs_unregister(void) +{ + mutex_lock(&algs_lock); + if (--active_devs == 0) + crypto_unregister_akcipher(&rsa); + mutex_unlock(&algs_lock); +} diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index 3bd705ca5..07c2f9f9d 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -88,12 +88,6 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) if (inst->pke_rx) adf_remove_ring(inst->pke_rx); - if (inst->rnd_tx) - adf_remove_ring(inst->rnd_tx); - - if (inst->rnd_rx) - adf_remove_ring(inst->rnd_rx); - list_del(list_ptr); kfree(inst); } @@ -109,9 +103,11 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) list_for_each(itr, adf_devmgr_get_head()) { accel_dev = list_entry(itr, struct adf_accel_dev, list); + if ((node == dev_to_node(&GET_DEV(accel_dev)) || dev_to_node(&GET_DEV(accel_dev)) < 0) && - adf_dev_started(accel_dev)) + adf_dev_started(accel_dev) && + !list_empty(&accel_dev->crypto_list)) break; accel_dev = NULL; } @@ -158,7 +154,6 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) INIT_LIST_HEAD(&accel_dev->crypto_list); strlcpy(key, ADF_NUM_CY, sizeof(key)); - if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) return -EFAULT; @@ -187,7 +182,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) if (kstrtoul(val, 10, &num_msg_sym)) goto err; + num_msg_sym = num_msg_sym >> 1; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) goto err; @@ -202,11 +199,6 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) msg_size, key, NULL, 0, &inst->sym_tx)) goto err; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i); - if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, - msg_size, key, NULL, 0, &inst->rnd_tx)) - goto err; - msg_size = msg_size >> 1; snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, @@ -220,15 +212,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) &inst->sym_rx)) goto err; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i); - if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, - msg_size, key, qat_alg_callback, 0, - &inst->rnd_rx)) - goto err; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, - msg_size, key, qat_alg_callback, 0, + msg_size, key, qat_alg_asym_callback, 0, &inst->pke_rx)) goto err; } diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h index d503007b4..dc0273fe3 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.h +++ b/drivers/crypto/qat/qat_common/qat_crypto.h @@ -57,8 +57,6 @@ struct qat_crypto_instance { struct adf_etr_ring_data *sym_rx; struct adf_etr_ring_data *pke_tx; struct adf_etr_ring_data *pke_rx; - struct adf_etr_ring_data *rnd_tx; - struct adf_etr_ring_data *rnd_rx; struct adf_accel_dev *accel_dev; struct list_head list; unsigned long state; diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index 274ff7e9d..8e711d1c3 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -671,7 +671,6 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) #define ICP_DH895XCC_CAP_OFFSET (ICP_DH895XCC_AE_OFFSET + 0x10000) #define LOCAL_TO_XFER_REG_OFFSET 0x800 #define ICP_DH895XCC_EP_OFFSET 0x3a000 -#define ICP_DH895XCC_PMISC_BAR 1 int qat_hal_init(struct adf_accel_dev *accel_dev) { unsigned char ae; @@ -679,21 +678,24 @@ int qat_hal_init(struct adf_accel_dev *accel_dev) struct icp_qat_fw_loader_handle *handle; struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; - struct adf_bar *bar = + struct adf_bar *misc_bar = &pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)]; + struct adf_bar *sram_bar = + &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)]; handle = kzalloc(sizeof(*handle), GFP_KERNEL); if (!handle) return -ENOMEM; - handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr + + handle->hal_cap_g_ctl_csr_addr_v = misc_bar->virt_addr + ICP_DH895XCC_CAP_OFFSET; - handle->hal_cap_ae_xfer_csr_addr_v = bar->virt_addr + + handle->hal_cap_ae_xfer_csr_addr_v = misc_bar->virt_addr + ICP_DH895XCC_AE_OFFSET; - handle->hal_ep_csr_addr_v = bar->virt_addr + ICP_DH895XCC_EP_OFFSET; + handle->hal_ep_csr_addr_v = misc_bar->virt_addr + + ICP_DH895XCC_EP_OFFSET; handle->hal_cap_ae_local_csr_addr_v = handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET; - + handle->hal_sram_addr_v = sram_bar->virt_addr; handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL); if (!handle->hal_handle) goto out_hal_handle; diff --git a/drivers/crypto/qat/qat_common/qat_rsakey.asn1 b/drivers/crypto/qat/qat_common/qat_rsakey.asn1 new file mode 100644 index 000000000..97b0e02b6 --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_rsakey.asn1 @@ -0,0 +1,5 @@ +RsaKey ::= SEQUENCE { + n INTEGER ({ qat_rsa_get_n }), + e INTEGER ({ qat_rsa_get_e }), + d INTEGER ({ qat_rsa_get_d }) +} diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 1e27f9f7f..c48f181e8 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -359,28 +359,7 @@ static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle, static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uof_initmem *init_mem) { - unsigned int i; - struct icp_qat_uof_memvar_attr *mem_val_attr; - - mem_val_attr = - (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem + - sizeof(struct icp_qat_uof_initmem)); - switch (init_mem->region) { - case ICP_QAT_UOF_SRAM_REGION: - if ((init_mem->addr + init_mem->num_in_bytes) > - ICP_DH895XCC_PESRAM_BAR_SIZE) { - pr_err("QAT: initmem on SRAM is out of range"); - return -EINVAL; - } - for (i = 0; i < init_mem->val_attr_num; i++) { - qat_uclo_wr_sram_by_words(handle, - init_mem->addr + - mem_val_attr->offset_in_byte, - &mem_val_attr->value, 4); - mem_val_attr++; - } - break; case ICP_QAT_UOF_LMEM_REGION: if (qat_uclo_init_lmem_seg(handle, init_mem)) return -EINVAL; @@ -990,6 +969,12 @@ out_err: return -EFAULT; } +void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, + void *addr_ptr, int mem_size) +{ + qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, ALIGN(mem_size, 4)); +} + int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, void *addr_ptr, int mem_size) { diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile index 25171c557..8c79c5437 100644 --- a/drivers/crypto/qat/qat_dh895xcc/Makefile +++ b/drivers/crypto/qat/qat_dh895xcc/Makefile @@ -2,7 +2,4 @@ ccflags-y := -I$(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o qat_dh895xcc-objs := adf_drv.o \ adf_isr.o \ - adf_dh895xcc_hw_data.o \ - adf_hw_arbiter.o \ - qat_admin.o \ - adf_admin.o + adf_dh895xcc_hw_data.o diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c deleted file mode 100644 index e4666065c..000000000 --- a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c +++ /dev/null @@ -1,145 +0,0 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ -#include -#include -#include -#include -#include -#include -#include -#include "adf_drv.h" -#include "adf_dh895xcc_hw_data.h" - -#define ADF_ADMINMSG_LEN 32 - -struct adf_admin_comms { - dma_addr_t phy_addr; - void *virt_addr; - void __iomem *mailbox_addr; - struct mutex lock; /* protects adf_admin_comms struct */ -}; - -int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, - uint32_t ae, void *in, void *out) -{ - struct adf_admin_comms *admin = accel_dev->admin; - int offset = ae * ADF_ADMINMSG_LEN * 2; - void __iomem *mailbox = admin->mailbox_addr; - int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE; - int times, received; - - mutex_lock(&admin->lock); - - if (ADF_CSR_RD(mailbox, mb_offset) == 1) { - mutex_unlock(&admin->lock); - return -EAGAIN; - } - - memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN); - ADF_CSR_WR(mailbox, mb_offset, 1); - received = 0; - for (times = 0; times < 50; times++) { - msleep(20); - if (ADF_CSR_RD(mailbox, mb_offset) == 0) { - received = 1; - break; - } - } - if (received) - memcpy(out, admin->virt_addr + offset + - ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN); - else - dev_err(&GET_DEV(accel_dev), - "Failed to send admin msg to accelerator\n"); - - mutex_unlock(&admin->lock); - return received ? 0 : -EFAULT; -} - -int adf_init_admin_comms(struct adf_accel_dev *accel_dev) -{ - struct adf_admin_comms *admin; - struct adf_bar *pmisc = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR]; - void __iomem *csr = pmisc->virt_addr; - void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET; - uint64_t reg_val; - - admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, - dev_to_node(&GET_DEV(accel_dev))); - if (!admin) - return -ENOMEM; - admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, - &admin->phy_addr, GFP_KERNEL); - if (!admin->virt_addr) { - dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); - kfree(admin); - return -ENOMEM; - } - reg_val = (uint64_t)admin->phy_addr; - ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32); - ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val); - mutex_init(&admin->lock); - admin->mailbox_addr = mailbox; - accel_dev->admin = admin; - return 0; -} - -void adf_exit_admin_comms(struct adf_accel_dev *accel_dev) -{ - struct adf_admin_comms *admin = accel_dev->admin; - - if (!admin) - return; - - if (admin->virt_addr) - dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, - admin->virt_addr, admin->phy_addr); - - mutex_destroy(&admin->lock); - kfree(admin); - accel_dev->admin = NULL; -} diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index b1386922d..ff54257ec 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -45,8 +45,9 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include +#include +#include #include "adf_dh895xcc_hw_data.h" -#include "adf_common_drv.h" #include "adf_drv.h" /* Worker thread to service arbiter mappings based on dev SKUs */ @@ -117,6 +118,11 @@ static uint32_t get_etr_bar_id(struct adf_hw_device_data *self) return ADF_DH895XCC_ETR_BAR; } +static uint32_t get_sram_bar_id(struct adf_hw_device_data *self) +{ + return ADF_DH895XCC_SRAM_BAR; +} + static enum dev_sku_info get_sku(struct adf_hw_device_data *self) { int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK) @@ -156,6 +162,16 @@ void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, } } +static uint32_t get_pf2vf_offset(uint32_t i) +{ + return ADF_DH895XCC_PF2VF_OFFSET(i); +} + +static uint32_t get_vintmsk_offset(uint32_t i) +{ + return ADF_DH895XCC_VINTMSK_OFFSET(i); +} + static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_device = accel_dev->hw_device; @@ -192,18 +208,23 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev) /* Enable bundle and misc interrupts */ ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET, - ADF_DH895XCC_SMIA0_MASK); + accel_dev->pf.vf_info ? 0 : + GENMASK_ULL(GET_MAX_BANKS(accel_dev) - 1, 0)); ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET, ADF_DH895XCC_SMIA1_MASK); } +static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev) +{ + return 0; +} + void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) { hw_data->dev_class = &dh895xcc_class; hw_data->instance_id = dh895xcc_class.instances++; hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS; hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS; - hw_data->pci_dev_id = ADF_DH895XCC_PCI_DEVICE_ID; hw_data->num_logical_accel = 1; hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES; hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET; @@ -211,21 +232,28 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) hw_data->alloc_irq = adf_isr_resource_alloc; hw_data->free_irq = adf_isr_resource_free; hw_data->enable_error_correction = adf_enable_error_correction; - hw_data->hw_arb_ring_enable = adf_update_ring_arb_enable; - hw_data->hw_arb_ring_disable = adf_update_ring_arb_enable; hw_data->get_accel_mask = get_accel_mask; hw_data->get_ae_mask = get_ae_mask; hw_data->get_num_accels = get_num_accels; hw_data->get_num_aes = get_num_aes; hw_data->get_etr_bar_id = get_etr_bar_id; hw_data->get_misc_bar_id = get_misc_bar_id; + hw_data->get_pf2vf_offset = get_pf2vf_offset; + hw_data->get_vintmsk_offset = get_vintmsk_offset; + hw_data->get_sram_bar_id = get_sram_bar_id; hw_data->get_sku = get_sku; hw_data->fw_name = ADF_DH895XCC_FW; + hw_data->fw_mmp_name = ADF_DH895XCC_MMP; hw_data->init_admin_comms = adf_init_admin_comms; hw_data->exit_admin_comms = adf_exit_admin_comms; + hw_data->disable_iov = adf_disable_sriov; + hw_data->send_admin_init = adf_send_admin_init; hw_data->init_arb = adf_init_arb; hw_data->exit_arb = adf_exit_arb; + hw_data->get_arb_mapping = adf_get_arbiter_mapping; hw_data->enable_ints = adf_enable_ints; + hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; + hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; } void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h index d5cb7beaa..cb7a0b3c2 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h @@ -48,6 +48,7 @@ #define ADF_DH895x_HW_DATA_H_ /* PCIe configuration space */ +#define ADF_DH895XCC_SRAM_BAR 0 #define ADF_DH895XCC_PMISC_BAR 1 #define ADF_DH895XCC_ETR_BAR 2 #define ADF_DH895XCC_RX_RINGS_OFFSET 8 @@ -79,10 +80,11 @@ #define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10) #define ADF_DH895XCC_ERRSSMSH_EN BIT(3) -/* Admin Messages Registers */ -#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574) -#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578) -#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970 -#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000 +#define ADF_DH895XCC_ERRSOU3 (0x3A000 + 0x00C) +#define ADF_DH895XCC_ERRSOU5 (0x3A000 + 0x0D8) +#define ADF_DH895XCC_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04)) +#define ADF_DH895XCC_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04)) +/* FW names */ #define ADF_DH895XCC_FW "/*(DEBLOBBED)*/" +#define ADF_DH895XCC_MMP "/*(DEBLOBBED)*/" #endif diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index db4e3e348..5106c21af 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -82,16 +82,21 @@ static struct pci_driver adf_driver = { .id_table = adf_pci_tbl, .name = adf_driver_name, .probe = adf_probe, - .remove = adf_remove + .remove = adf_remove, + .sriov_configure = adf_sriov_configure, }; +static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) +{ + pci_release_regions(accel_dev->accel_pci_dev.pci_dev); + pci_disable_device(accel_dev->accel_pci_dev.pci_dev); +} + static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; int i; - adf_dev_shutdown(accel_dev); - for (i = 0; i < ADF_PCI_MAX_BARS; i++) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; @@ -100,7 +105,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) } if (accel_dev->hw_device) { - switch (accel_dev->hw_device->pci_dev_id) { + switch (accel_pci_dev->pci_dev->device) { case ADF_DH895XCC_PCI_DEVICE_ID: adf_clean_hw_data_dh895xcc(accel_dev->hw_device); break; @@ -108,13 +113,11 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) break; } kfree(accel_dev->hw_device); + accel_dev->hw_device = NULL; } adf_cfg_dev_remove(accel_dev); debugfs_remove(accel_dev->debugfs_dir); - adf_devmgr_rm_dev(accel_dev); - pci_release_regions(accel_pci_dev->pci_dev); - pci_disable_device(accel_pci_dev->pci_dev); - kfree(accel_dev); + adf_devmgr_rm_dev(accel_dev, NULL); } static int adf_dev_configure(struct adf_accel_dev *accel_dev) @@ -167,12 +170,6 @@ static int adf_dev_configure(struct adf_accel_dev *accel_dev) key, (void *)&val, ADF_DEC)) goto err; - val = 4; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - val = 8; snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, @@ -185,12 +182,6 @@ static int adf_dev_configure(struct adf_accel_dev *accel_dev) key, (void *)&val, ADF_DEC)) goto err; - val = 12; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - val = ADF_COALESCING_DEF_TIME; snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0", @@ -217,7 +208,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct adf_hw_device_data *hw_data; char name[ADF_DEVICE_NAME_LENGTH]; unsigned int i, bar_nr; - int ret; + int ret, bar_mask; switch (ent->device) { case ADF_DH895XCC_PCI_DEVICE_ID: @@ -241,10 +232,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENOMEM; INIT_LIST_HEAD(&accel_dev->crypto_list); + accel_pci_dev = &accel_dev->accel_pci_dev; + accel_pci_dev->pci_dev = pdev; /* Add accel device to accel table. * This should be called before adf_cleanup_accel is called */ - if (adf_devmgr_add_dev(accel_dev)) { + if (adf_devmgr_add_dev(accel_dev, NULL)) { dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); kfree(accel_dev); return -EFAULT; @@ -267,7 +260,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) default: return -ENODEV; } - accel_pci_dev = &accel_dev->accel_pci_dev; pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET, &hw_data->fuses); @@ -276,7 +268,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); accel_pci_dev->sku = hw_data->get_sku(hw_data); - accel_pci_dev->pci_dev = pdev; /* If the device has no acceleration engines then ignore it. */ if (!hw_data->accel_mask || !hw_data->ae_mask || ((~hw_data->ae_mask) & 0x01)) { @@ -286,11 +277,14 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } /* Create dev top level debugfs entry */ - snprintf(name, sizeof(name), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX, - hw_data->dev_class->name, hw_data->instance_id); + snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d", + ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name, + pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); if (!accel_dev->debugfs_dir) { - dev_err(&pdev->dev, "Could not create debugfs dir\n"); + dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); ret = -EINVAL; goto out_err; } @@ -313,7 +307,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { dev_err(&pdev->dev, "No usable DMA configuration\n"); ret = -EFAULT; - goto out_err; + goto out_err_disable; } else { pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); } @@ -324,7 +318,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (pci_request_regions(pdev, adf_driver_name)) { ret = -EFAULT; - goto out_err; + goto out_err_disable; } /* Read accelerator capabilities mask */ @@ -332,19 +326,21 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) &hw_data->accel_capabilities_mask); /* Find and map all the device's BARS */ - for (i = 0; i < ADF_PCI_MAX_BARS; i++) { - struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; + i = 0; + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); + for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, + ADF_PCI_MAX_BARS * 2) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; - bar_nr = i * 2; bar->base_addr = pci_resource_start(pdev, bar_nr); if (!bar->base_addr) break; bar->size = pci_resource_len(pdev, bar_nr); bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); if (!bar->virt_addr) { - dev_err(&pdev->dev, "Failed to map BAR %d\n", i); + dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr); ret = -EFAULT; - goto out_err; + goto out_err_free_reg; } } pci_set_master(pdev); @@ -352,32 +348,40 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (adf_enable_aer(accel_dev, &adf_driver)) { dev_err(&pdev->dev, "Failed to enable aer\n"); ret = -EFAULT; - goto out_err; + goto out_err_free_reg; } if (pci_save_state(pdev)) { dev_err(&pdev->dev, "Failed to save pci state\n"); ret = -ENOMEM; - goto out_err; + goto out_err_free_reg; } ret = adf_dev_configure(accel_dev); if (ret) - goto out_err; + goto out_err_free_reg; ret = adf_dev_init(accel_dev); if (ret) - goto out_err; + goto out_err_dev_shutdown; ret = adf_dev_start(accel_dev); - if (ret) { - adf_dev_stop(accel_dev); - goto out_err; - } + if (ret) + goto out_err_dev_stop; - return 0; + return ret; + +out_err_dev_stop: + adf_dev_stop(accel_dev); +out_err_dev_shutdown: + adf_dev_shutdown(accel_dev); +out_err_free_reg: + pci_release_regions(accel_pci_dev->pci_dev); +out_err_disable: + pci_disable_device(accel_pci_dev->pci_dev); out_err: adf_cleanup_accel(accel_dev); + kfree(accel_dev); return ret; } @@ -391,15 +395,17 @@ static void adf_remove(struct pci_dev *pdev) } if (adf_dev_stop(accel_dev)) dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); + + adf_dev_shutdown(accel_dev); adf_disable_aer(accel_dev); adf_cleanup_accel(accel_dev); + adf_cleanup_pci_dev(accel_dev); + kfree(accel_dev); } static int __init adfdrv_init(void) { request_module("intel_qat"); - if (qat_admin_register()) - return -EFAULT; if (pci_register_driver(&adf_driver)) { pr_err("QAT: Driver initialization failed\n"); @@ -411,7 +417,6 @@ static int __init adfdrv_init(void) static void __exit adfdrv_release(void) { pci_unregister_driver(&adf_driver); - qat_admin_unregister(); } module_init(adfdrv_init); diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h index a2fbb6ce7..85ff245bd 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h @@ -53,15 +53,6 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev); void adf_isr_resource_free(struct adf_accel_dev *accel_dev); -void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring); void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, uint32_t const **arb_map_config); -int adf_init_admin_comms(struct adf_accel_dev *accel_dev); -void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); -int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, - uint32_t ae, void *in, void *out); -int qat_admin_register(void); -int qat_admin_unregister(void); -int adf_init_arb(struct adf_accel_dev *accel_dev); -void adf_exit_arb(struct adf_accel_dev *accel_dev); #endif diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c b/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c deleted file mode 100644 index 1864bdb36..000000000 --- a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c +++ /dev/null @@ -1,159 +0,0 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ -#include -#include -#include "adf_drv.h" - -#define ADF_ARB_NUM 4 -#define ADF_ARB_REQ_RING_NUM 8 -#define ADF_ARB_REG_SIZE 0x4 -#define ADF_ARB_WTR_SIZE 0x20 -#define ADF_ARB_OFFSET 0x30000 -#define ADF_ARB_REG_SLOT 0x1000 -#define ADF_ARB_WTR_OFFSET 0x010 -#define ADF_ARB_RO_EN_OFFSET 0x090 -#define ADF_ARB_WQCFG_OFFSET 0x100 -#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180 -#define ADF_ARB_WRK_2_SER_MAP 10 -#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C - -#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \ - ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ - (ADF_ARB_REG_SLOT * index), value) - -#define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \ - ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ - ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value) - -#define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \ - ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ - ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \ - (ADF_ARB_REG_SIZE * index), value) - -#define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \ - ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \ - (ADF_ARB_REG_SIZE * index), value) - -#define WRITE_CSR_ARB_WRK_2_SER_MAP(csr_addr, index, value) \ - ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ - ADF_ARB_WRK_2_SER_MAP_OFFSET) + \ - (ADF_ARB_REG_SIZE * index), value) - -#define WRITE_CSR_ARB_WQCFG(csr_addr, index, value) \ - ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \ - ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value) - -int adf_init_arb(struct adf_accel_dev *accel_dev) -{ - void __iomem *csr = accel_dev->transport->banks[0].csr_addr; - uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1; - uint32_t arb, i; - const uint32_t *thd_2_arb_cfg; - - /* Service arb configured for 32 bytes responses and - * ring flow control check enabled. */ - for (arb = 0; arb < ADF_ARB_NUM; arb++) - WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg); - - /* Setup service weighting */ - for (arb = 0; arb < ADF_ARB_NUM; arb++) - for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++) - WRITE_CSR_ARB_WEIGHT(csr, arb, i, 0xFFFFFFFF); - - /* Setup ring response ordering */ - for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++) - WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF); - - /* Setup worker queue registers */ - for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) - WRITE_CSR_ARB_WQCFG(csr, i, i); - - /* Map worker threads to service arbiters */ - adf_get_arbiter_mapping(accel_dev, &thd_2_arb_cfg); - - if (!thd_2_arb_cfg) - return -EFAULT; - - for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) - WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i)); - - return 0; -} - -void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring) -{ - WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr, - ring->bank->bank_number, - ring->bank->ring_mask & 0xFF); -} - -void adf_exit_arb(struct adf_accel_dev *accel_dev) -{ - void __iomem *csr; - unsigned int i; - - if (!accel_dev->transport) - return; - - csr = accel_dev->transport->banks[0].csr_addr; - - /* Reset arbiter configuration */ - for (i = 0; i < ADF_ARB_NUM; i++) - WRITE_CSR_ARB_SARCONFIG(csr, i, 0); - - /* Shutdown work queue */ - for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) - WRITE_CSR_ARB_WQCFG(csr, i, 0); - - /* Unmap worker threads to service arbiters */ - for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) - WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0); - - /* Disable arbitration on all rings */ - for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) - WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0); -} diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c index 0d03c109c..5570f7879 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c @@ -59,21 +59,30 @@ #include #include #include "adf_drv.h" +#include "adf_dh895xcc_hw_data.h" static int adf_enable_msix(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; - uint32_t msix_num_entries = hw_data->num_banks + 1; - int i; - - for (i = 0; i < msix_num_entries; i++) - pci_dev_info->msix_entries.entries[i].entry = i; + u32 msix_num_entries = 1; + + /* If SR-IOV is disabled, add entries for each bank */ + if (!accel_dev->pf.vf_info) { + int i; + + msix_num_entries += hw_data->num_banks; + for (i = 0; i < msix_num_entries; i++) + pci_dev_info->msix_entries.entries[i].entry = i; + } else { + pci_dev_info->msix_entries.entries[0].entry = + hw_data->num_banks; + } if (pci_enable_msix_exact(pci_dev_info->pci_dev, pci_dev_info->msix_entries.entries, msix_num_entries)) { - dev_err(&GET_DEV(accel_dev), "Failed to enable MSIX IRQ\n"); + dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n"); return -EFAULT; } return 0; @@ -97,9 +106,58 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) { struct adf_accel_dev *accel_dev = dev_ptr; - dev_info(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n", - accel_dev->accel_id); - return IRQ_HANDLED; +#ifdef CONFIG_PCI_IOV + /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */ + if (accel_dev->pf.vf_info) { + void __iomem *pmisc_bar_addr = + (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr; + u32 vf_mask; + + /* Get the interrupt sources triggered by VFs */ + vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU5) & + 0x0000FFFF) << 16) | + ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU3) & + 0x01FFFE00) >> 9); + + if (vf_mask) { + struct adf_accel_vf_info *vf_info; + bool irq_handled = false; + int i; + + /* Disable VF2PF interrupts for VFs with pending ints */ + adf_disable_vf2pf_interrupts(accel_dev, vf_mask); + + /* + * Schedule tasklets to handle VF2PF interrupt BHs + * unless the VF is malicious and is attempting to + * flood the host OS with VF2PF interrupts. + */ + for_each_set_bit(i, (const unsigned long *)&vf_mask, + (sizeof(vf_mask) * BITS_PER_BYTE)) { + vf_info = accel_dev->pf.vf_info + i; + + if (!__ratelimit(&vf_info->vf2pf_ratelimit)) { + dev_info(&GET_DEV(accel_dev), + "Too many ints from VF%d\n", + vf_info->vf_nr + 1); + continue; + } + + /* Tasklet will re-enable ints from this VF */ + tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet); + irq_handled = true; + } + + if (irq_handled) + return IRQ_HANDLED; + } + } +#endif /* CONFIG_PCI_IOV */ + + dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n", + accel_dev->accel_id); + + return IRQ_NONE; } static int adf_request_irqs(struct adf_accel_dev *accel_dev) @@ -108,28 +166,32 @@ static int adf_request_irqs(struct adf_accel_dev *accel_dev) struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct msix_entry *msixe = pci_dev_info->msix_entries.entries; struct adf_etr_data *etr_data = accel_dev->transport; - int ret, i; + int ret, i = 0; char *name; - /* Request msix irq for all banks */ - for (i = 0; i < hw_data->num_banks; i++) { - struct adf_etr_bank_data *bank = &etr_data->banks[i]; - unsigned int cpu, cpus = num_online_cpus(); - - name = *(pci_dev_info->msix_entries.names + i); - snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, - "qat%d-bundle%d", accel_dev->accel_id, i); - ret = request_irq(msixe[i].vector, - adf_msix_isr_bundle, 0, name, bank); - if (ret) { - dev_err(&GET_DEV(accel_dev), - "failed to enable irq %d for %s\n", - msixe[i].vector, name); - return ret; + /* Request msix irq for all banks unless SR-IOV enabled */ + if (!accel_dev->pf.vf_info) { + for (i = 0; i < hw_data->num_banks; i++) { + struct adf_etr_bank_data *bank = &etr_data->banks[i]; + unsigned int cpu, cpus = num_online_cpus(); + + name = *(pci_dev_info->msix_entries.names + i); + snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, + "qat%d-bundle%d", accel_dev->accel_id, i); + ret = request_irq(msixe[i].vector, + adf_msix_isr_bundle, 0, name, bank); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "failed to enable irq %d for %s\n", + msixe[i].vector, name); + return ret; + } + + cpu = ((accel_dev->accel_id * hw_data->num_banks) + + i) % cpus; + irq_set_affinity_hint(msixe[i].vector, + get_cpu_mask(cpu)); } - - cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus; - irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu)); } /* Request msix irq for AE */ @@ -152,11 +214,13 @@ static void adf_free_irqs(struct adf_accel_dev *accel_dev) struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct msix_entry *msixe = pci_dev_info->msix_entries.entries; struct adf_etr_data *etr_data = accel_dev->transport; - int i; + int i = 0; - for (i = 0; i < hw_data->num_banks; i++) { - irq_set_affinity_hint(msixe[i].vector, NULL); - free_irq(msixe[i].vector, &etr_data->banks[i]); + if (pci_dev_info->msix_entries.num_entries > 1) { + for (i = 0; i < hw_data->num_banks; i++) { + irq_set_affinity_hint(msixe[i].vector, NULL); + free_irq(msixe[i].vector, &etr_data->banks[i]); + } } irq_set_affinity_hint(msixe[i].vector, NULL); free_irq(msixe[i].vector, accel_dev); @@ -168,7 +232,11 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) char **names; struct msix_entry *entries; struct adf_hw_device_data *hw_data = accel_dev->hw_device; - uint32_t msix_num_entries = hw_data->num_banks + 1; + u32 msix_num_entries = 1; + + /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */ + if (!accel_dev->pf.vf_info) + msix_num_entries += hw_data->num_banks; entries = kzalloc_node(msix_num_entries * sizeof(*entries), GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); @@ -185,6 +253,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) if (!(*(names + i))) goto err; } + accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries; accel_dev->accel_pci_dev.msix_entries.entries = entries; accel_dev->accel_pci_dev.msix_entries.names = names; return 0; @@ -198,13 +267,11 @@ err: static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev) { - struct adf_hw_device_data *hw_data = accel_dev->hw_device; - uint32_t msix_num_entries = hw_data->num_banks + 1; char **names = accel_dev->accel_pci_dev.msix_entries.names; int i; kfree(accel_dev->accel_pci_dev.msix_entries.entries); - for (i = 0; i < msix_num_entries; i++) + for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++) kfree(*(names + i)); kfree(names); } diff --git a/drivers/crypto/qat/qat_dh895xcc/qat_admin.c b/drivers/crypto/qat/qat_dh895xcc/qat_admin.c deleted file mode 100644 index 55b7a8e48..000000000 --- a/drivers/crypto/qat/qat_dh895xcc/qat_admin.c +++ /dev/null @@ -1,107 +0,0 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ -#include -#include -#include -#include "adf_drv.h" - -static struct service_hndl qat_admin; - -static int qat_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd) -{ - struct adf_hw_device_data *hw_device = accel_dev->hw_device; - struct icp_qat_fw_init_admin_req req; - struct icp_qat_fw_init_admin_resp resp; - int i; - - memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req)); - req.init_admin_cmd_id = cmd; - for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { - memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp)); - if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) || - resp.init_resp_hdr.status) - return -EFAULT; - } - return 0; -} - -static int qat_admin_start(struct adf_accel_dev *accel_dev) -{ - return qat_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME); -} - -static int qat_admin_event_handler(struct adf_accel_dev *accel_dev, - enum adf_event event) -{ - int ret; - - switch (event) { - case ADF_EVENT_START: - ret = qat_admin_start(accel_dev); - break; - case ADF_EVENT_STOP: - case ADF_EVENT_INIT: - case ADF_EVENT_SHUTDOWN: - default: - ret = 0; - } - return ret; -} - -int qat_admin_register(void) -{ - memset(&qat_admin, 0, sizeof(struct service_hndl)); - qat_admin.event_hld = qat_admin_event_handler; - qat_admin.name = "qat_admin"; - qat_admin.admin = 1; - return adf_service_register(&qat_admin); -} - -int qat_admin_unregister(void) -{ - return adf_service_unregister(&qat_admin); -} diff --git a/drivers/crypto/qat/qat_dh895xccvf/Makefile b/drivers/crypto/qat/qat_dh895xccvf/Makefile new file mode 100644 index 000000000..85399fcbb --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xccvf/Makefile @@ -0,0 +1,5 @@ +ccflags-y := -I$(src)/../qat_common +obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o +qat_dh895xccvf-objs := adf_drv.o \ + adf_isr.o \ + adf_dh895xccvf_hw_data.o diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c new file mode 100644 index 000000000..a9a27eff4 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c @@ -0,0 +1,172 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2015 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2015 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include "adf_dh895xccvf_hw_data.h" +#include "adf_drv.h" + +static struct adf_hw_device_class dh895xcciov_class = { + .name = ADF_DH895XCCVF_DEVICE_NAME, + .type = DEV_DH895XCCVF, + .instances = 0 +}; + +static u32 get_accel_mask(u32 fuse) +{ + return ADF_DH895XCCIOV_ACCELERATORS_MASK; +} + +static u32 get_ae_mask(u32 fuse) +{ + return ADF_DH895XCCIOV_ACCELENGINES_MASK; +} + +static u32 get_num_accels(struct adf_hw_device_data *self) +{ + return ADF_DH895XCCIOV_MAX_ACCELERATORS; +} + +static u32 get_num_aes(struct adf_hw_device_data *self) +{ + return ADF_DH895XCCIOV_MAX_ACCELENGINES; +} + +static u32 get_misc_bar_id(struct adf_hw_device_data *self) +{ + return ADF_DH895XCCIOV_PMISC_BAR; +} + +static u32 get_etr_bar_id(struct adf_hw_device_data *self) +{ + return ADF_DH895XCCIOV_ETR_BAR; +} + +static enum dev_sku_info get_sku(struct adf_hw_device_data *self) +{ + return DEV_SKU_VF; +} + +static u32 get_pf2vf_offset(u32 i) +{ + return ADF_DH895XCCIOV_PF2VF_OFFSET; +} + +static u32 get_vintmsk_offset(u32 i) +{ + return ADF_DH895XCCIOV_VINTMSK_OFFSET; +} + +static int adf_vf_int_noop(struct adf_accel_dev *accel_dev) +{ + return 0; +} + +static void adf_vf_void_noop(struct adf_accel_dev *accel_dev) +{ +} + +static int adf_vf2pf_init(struct adf_accel_dev *accel_dev) +{ + u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | + (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT)); + + if (adf_iov_putmsg(accel_dev, msg, 0)) { + dev_err(&GET_DEV(accel_dev), + "Failed to send Init event to PF\n"); + return -EFAULT; + } + return 0; +} + +static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev) +{ + u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | + (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT)); + + if (adf_iov_putmsg(accel_dev, msg, 0)) + dev_err(&GET_DEV(accel_dev), + "Failed to send Shutdown event to PF\n"); +} + +void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class = &dh895xcciov_class; + hw_data->instance_id = dh895xcciov_class.instances++; + hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS; + hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS; + hw_data->num_logical_accel = 1; + hw_data->num_engines = ADF_DH895XCCIOV_MAX_ACCELENGINES; + hw_data->tx_rx_gap = ADF_DH895XCCIOV_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_DH895XCCIOV_TX_RINGS_MASK; + hw_data->alloc_irq = adf_vf_isr_resource_alloc; + hw_data->free_irq = adf_vf_isr_resource_free; + hw_data->enable_error_correction = adf_vf_void_noop; + hw_data->init_admin_comms = adf_vf_int_noop; + hw_data->exit_admin_comms = adf_vf_void_noop; + hw_data->send_admin_init = adf_vf2pf_init; + hw_data->init_arb = adf_vf_int_noop; + hw_data->exit_arb = adf_vf_void_noop; + hw_data->disable_iov = adf_vf2pf_shutdown; + hw_data->get_accel_mask = get_accel_mask; + hw_data->get_ae_mask = get_ae_mask; + hw_data->get_num_accels = get_num_accels; + hw_data->get_num_aes = get_num_aes; + hw_data->get_etr_bar_id = get_etr_bar_id; + hw_data->get_misc_bar_id = get_misc_bar_id; + hw_data->get_pf2vf_offset = get_pf2vf_offset; + hw_data->get_vintmsk_offset = get_vintmsk_offset; + hw_data->get_sku = get_sku; + hw_data->enable_ints = adf_vf_void_noop; + hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms; + hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; +} + +void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class->instances--; +} diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h new file mode 100644 index 000000000..8f6babfef --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h @@ -0,0 +1,68 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2015 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2015 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_DH895XVF_HW_DATA_H_ +#define ADF_DH895XVF_HW_DATA_H_ + +#define ADF_DH895XCCIOV_PMISC_BAR 1 +#define ADF_DH895XCCIOV_ACCELERATORS_MASK 0x1 +#define ADF_DH895XCCIOV_ACCELENGINES_MASK 0x1 +#define ADF_DH895XCCIOV_MAX_ACCELERATORS 1 +#define ADF_DH895XCCIOV_MAX_ACCELENGINES 1 +#define ADF_DH895XCCIOV_RX_RINGS_OFFSET 8 +#define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF +#define ADF_DH895XCCIOV_ETR_BAR 0 +#define ADF_DH895XCCIOV_ETR_MAX_BANKS 1 + +#define ADF_DH895XCCIOV_PF2VF_OFFSET 0x200 +#define ADF_DH895XCC_PF2VF_PF2VFINT BIT(0) + +#define ADF_DH895XCCIOV_VINTSOU_OFFSET 0x204 +#define ADF_DH895XCC_VINTSOU_BUN BIT(0) +#define ADF_DH895XCC_VINTSOU_PF2VF BIT(1) + +#define ADF_DH895XCCIOV_VINTMSK_OFFSET 0x208 +#endif diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c new file mode 100644 index 000000000..789426f21 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c @@ -0,0 +1,393 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "adf_dh895xccvf_hw_data.h" +#include "adf_drv.h" + +static const char adf_driver_name[] = ADF_DH895XCCVF_DEVICE_NAME; + +#define ADF_SYSTEM_DEVICE(device_id) \ + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +static const struct pci_device_id adf_pci_tbl[] = { + ADF_SYSTEM_DEVICE(ADF_DH895XCCIOV_PCI_DEVICE_ID), + {0,} +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); +static void adf_remove(struct pci_dev *dev); + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = adf_driver_name, + .probe = adf_probe, + .remove = adf_remove, +}; + +static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) +{ + pci_release_regions(accel_dev->accel_pci_dev.pci_dev); + pci_disable_device(accel_dev->accel_pci_dev.pci_dev); +} + +static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; + struct adf_accel_dev *pf; + int i; + + for (i = 0; i < ADF_PCI_MAX_BARS; i++) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; + + if (bar->virt_addr) + pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); + } + + if (accel_dev->hw_device) { + switch (accel_pci_dev->pci_dev->device) { + case ADF_DH895XCCIOV_PCI_DEVICE_ID: + adf_clean_hw_data_dh895xcciov(accel_dev->hw_device); + break; + default: + break; + } + kfree(accel_dev->hw_device); + accel_dev->hw_device = NULL; + } + adf_cfg_dev_remove(accel_dev); + debugfs_remove(accel_dev->debugfs_dir); + pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn); + adf_devmgr_rm_dev(accel_dev, pf); +} + +static int adf_dev_configure(struct adf_accel_dev *accel_dev) +{ + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + unsigned long val, bank = 0; + + if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) + goto err; + if (adf_cfg_section_add(accel_dev, "Accelerator0")) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, 0); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key, + (void *)&bank, ADF_DEC)) + goto err; + + val = bank; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, 0); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key, + (void *)&val, ADF_DEC)) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, 0); + + val = 128; + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key, + (void *)&val, ADF_DEC)) + goto err; + + val = 512; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, 0); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 0; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, 0); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 2; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, 0); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 8; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, 0); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 10; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, 0); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = ADF_COALESCING_DEF_TIME; + snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, + (int)bank); + if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0", + key, (void *)&val, ADF_DEC)) + goto err; + + val = 1; + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + ADF_NUM_CY, (void *)&val, ADF_DEC)) + goto err; + + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + return 0; +err: + dev_err(&GET_DEV(accel_dev), "Failed to configure QAT accel dev\n"); + return -EINVAL; +} + +static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct adf_accel_dev *accel_dev; + struct adf_accel_dev *pf; + struct adf_accel_pci *accel_pci_dev; + struct adf_hw_device_data *hw_data; + char name[ADF_DEVICE_NAME_LENGTH]; + unsigned int i, bar_nr; + int ret, bar_mask; + + switch (ent->device) { + case ADF_DH895XCCIOV_PCI_DEVICE_ID: + break; + default: + dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); + return -ENODEV; + } + + accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!accel_dev) + return -ENOMEM; + + accel_dev->is_vf = true; + pf = adf_devmgr_pci_to_accel_dev(pdev->physfn); + accel_pci_dev = &accel_dev->accel_pci_dev; + accel_pci_dev->pci_dev = pdev; + + /* Add accel device to accel table */ + if (adf_devmgr_add_dev(accel_dev, pf)) { + dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); + kfree(accel_dev); + return -EFAULT; + } + INIT_LIST_HEAD(&accel_dev->crypto_list); + + accel_dev->owner = THIS_MODULE; + /* Allocate and configure device configuration structure */ + hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!hw_data) { + ret = -ENOMEM; + goto out_err; + } + accel_dev->hw_device = hw_data; + switch (ent->device) { + case ADF_DH895XCCIOV_PCI_DEVICE_ID: + adf_init_hw_data_dh895xcciov(accel_dev->hw_device); + break; + default: + ret = -ENODEV; + goto out_err; + } + + /* Get Accelerators and Accelerators Engines masks */ + hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); + hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); + accel_pci_dev->sku = hw_data->get_sku(hw_data); + + /* Create dev top level debugfs entry */ + snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d", + ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name, + pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + + accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); + if (!accel_dev->debugfs_dir) { + dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); + ret = -EINVAL; + goto out_err; + } + + /* Create device configuration table */ + ret = adf_cfg_dev_add(accel_dev); + if (ret) + goto out_err; + + /* enable PCI device */ + if (pci_enable_device(pdev)) { + ret = -EFAULT; + goto out_err; + } + + /* set dma identifier */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + dev_err(&pdev->dev, "No usable DMA configuration\n"); + ret = -EFAULT; + goto out_err_disable; + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } + + if (pci_request_regions(pdev, adf_driver_name)) { + ret = -EFAULT; + goto out_err_disable; + } + + /* Find and map all the device's BARS */ + i = 0; + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); + for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, + ADF_PCI_MAX_BARS * 2) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; + + bar->base_addr = pci_resource_start(pdev, bar_nr); + if (!bar->base_addr) + break; + bar->size = pci_resource_len(pdev, bar_nr); + bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); + if (!bar->virt_addr) { + dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr); + ret = -EFAULT; + goto out_err_free_reg; + } + } + pci_set_master(pdev); + /* Completion for VF2PF request/response message exchange */ + init_completion(&accel_dev->vf.iov_msg_completion); + + ret = adf_dev_configure(accel_dev); + if (ret) + goto out_err_free_reg; + + ret = adf_dev_init(accel_dev); + if (ret) + goto out_err_dev_shutdown; + + ret = adf_dev_start(accel_dev); + if (ret) + goto out_err_dev_stop; + + return ret; + +out_err_dev_stop: + adf_dev_stop(accel_dev); +out_err_dev_shutdown: + adf_dev_shutdown(accel_dev); +out_err_free_reg: + pci_release_regions(accel_pci_dev->pci_dev); +out_err_disable: + pci_disable_device(accel_pci_dev->pci_dev); +out_err: + adf_cleanup_accel(accel_dev); + kfree(accel_dev); + return ret; +} + +static void adf_remove(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + if (!accel_dev) { + pr_err("QAT: Driver removal failed\n"); + return; + } + if (adf_dev_stop(accel_dev)) + dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); + + adf_dev_shutdown(accel_dev); + adf_cleanup_accel(accel_dev); + adf_cleanup_pci_dev(accel_dev); + kfree(accel_dev); +} + +static int __init adfdrv_init(void) +{ + request_module("intel_qat"); + + if (pci_register_driver(&adf_driver)) { + pr_err("QAT: Driver initialization failed\n"); + return -EFAULT; + } + return 0; +} + +static void __exit adfdrv_release(void) +{ + pci_unregister_driver(&adf_driver); + adf_clean_vf_map(true); +} + +module_init(adfdrv_init); +module_exit(adfdrv_release); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel"); +MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); +MODULE_VERSION(ADF_DRV_VERSION); diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h new file mode 100644 index 000000000..e270e4a63 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h @@ -0,0 +1,57 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_DH895xVF_DRV_H_ +#define ADF_DH895xVF_DRV_H_ +#include +#include + +void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data); +void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data); +int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev); +void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev); +void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring); +#endif diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c b/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c new file mode 100644 index 000000000..87c5d8adb --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c @@ -0,0 +1,258 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "adf_drv.h" +#include "adf_dh895xccvf_hw_data.h" + +static int adf_enable_msi(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; + int stat = pci_enable_msi(pci_dev_info->pci_dev); + + if (stat) { + dev_err(&GET_DEV(accel_dev), + "Failed to enable MSI interrupts\n"); + return stat; + } + + accel_dev->vf.irq_name = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL); + if (!accel_dev->vf.irq_name) + return -ENOMEM; + + return stat; +} + +static void adf_disable_msi(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + + kfree(accel_dev->vf.irq_name); + pci_disable_msi(pdev); +} + +static void adf_pf2vf_bh_handler(void *data) +{ + struct adf_accel_dev *accel_dev = data; + void __iomem *pmisc_bar_addr = + (&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr; + u32 msg; + + /* Read the message from PF */ + msg = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET); + + if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM)) + /* Ignore legacy non-system (non-kernel) PF2VF messages */ + goto err; + + switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) { + case ADF_PF2VF_MSGTYPE_RESTARTING: + dev_dbg(&GET_DEV(accel_dev), + "Restarting msg received from PF 0x%x\n", msg); + adf_dev_stop(accel_dev); + break; + case ADF_PF2VF_MSGTYPE_VERSION_RESP: + dev_dbg(&GET_DEV(accel_dev), + "Version resp received from PF 0x%x\n", msg); + accel_dev->vf.pf_version = + (msg & ADF_PF2VF_VERSION_RESP_VERS_MASK) >> + ADF_PF2VF_VERSION_RESP_VERS_SHIFT; + accel_dev->vf.compatible = + (msg & ADF_PF2VF_VERSION_RESP_RESULT_MASK) >> + ADF_PF2VF_VERSION_RESP_RESULT_SHIFT; + complete(&accel_dev->vf.iov_msg_completion); + break; + default: + goto err; + } + + /* To ack, clear the PF2VFINT bit */ + msg &= ~ADF_DH895XCC_PF2VF_PF2VFINT; + ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET, msg); + + /* Re-enable PF2VF interrupts */ + adf_enable_pf2vf_interrupts(accel_dev); + return; +err: + dev_err(&GET_DEV(accel_dev), + "Unknown message from PF (0x%x); leaving PF2VF ints disabled\n", + msg); +} + +static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev) +{ + tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet, + (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev); + + mutex_init(&accel_dev->vf.vf2pf_lock); + return 0; +} + +static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev) +{ + tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet); + tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet); + mutex_destroy(&accel_dev->vf.vf2pf_lock); +} + +static irqreturn_t adf_isr(int irq, void *privdata) +{ + struct adf_accel_dev *accel_dev = privdata; + void __iomem *pmisc_bar_addr = + (&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr; + u32 v_int; + + /* Read VF INT source CSR to determine the source of VF interrupt */ + v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_VINTSOU_OFFSET); + + /* Check for PF2VF interrupt */ + if (v_int & ADF_DH895XCC_VINTSOU_PF2VF) { + /* Disable PF to VF interrupt */ + adf_disable_pf2vf_interrupts(accel_dev); + + /* Schedule tasklet to handle interrupt BH */ + tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet); + return IRQ_HANDLED; + } + + /* Check bundle interrupt */ + if (v_int & ADF_DH895XCC_VINTSOU_BUN) { + struct adf_etr_data *etr_data = accel_dev->transport; + struct adf_etr_bank_data *bank = &etr_data->banks[0]; + + /* Disable Flag and Coalesce Ring Interrupts */ + WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, + 0); + tasklet_hi_schedule(&bank->resp_handler); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int adf_request_msi_irq(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + unsigned int cpu; + int ret; + + snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME, + "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name, + (void *)accel_dev); + if (ret) { + dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n", + accel_dev->vf.irq_name); + return ret; + } + cpu = accel_dev->accel_id % num_online_cpus(); + irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu)); + + return ret; +} + +static int adf_setup_bh(struct adf_accel_dev *accel_dev) +{ + struct adf_etr_data *priv_data = accel_dev->transport; + + tasklet_init(&priv_data->banks[0].resp_handler, adf_response_handler, + (unsigned long)priv_data->banks); + return 0; +} + +static void adf_cleanup_bh(struct adf_accel_dev *accel_dev) +{ + struct adf_etr_data *priv_data = accel_dev->transport; + + tasklet_disable(&priv_data->banks[0].resp_handler); + tasklet_kill(&priv_data->banks[0].resp_handler); +} + +void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + + irq_set_affinity_hint(pdev->irq, NULL); + free_irq(pdev->irq, (void *)accel_dev); + adf_cleanup_bh(accel_dev); + adf_cleanup_pf2vf_bh(accel_dev); + adf_disable_msi(accel_dev); +} + +int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev) +{ + if (adf_enable_msi(accel_dev)) + goto err_out; + + if (adf_setup_pf2vf_bh(accel_dev)) + goto err_out; + + if (adf_setup_bh(accel_dev)) + goto err_out; + + if (adf_request_msi_irq(accel_dev)) + goto err_out; + + return 0; +err_out: + adf_vf_isr_resource_free(accel_dev); + return -EFAULT; +} diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 5c5df1d17..be2f50492 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -296,7 +296,7 @@ static int qce_ahash_update(struct ahash_request *req) if (rctx->buflen) { sg_init_table(rctx->sg, 2); sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); - scatterwalk_sg_chain(rctx->sg, 2, req->src); + sg_chain(rctx->sg, 2, req->src); req->src = rctx->sg; } diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 397a500b3..820dc3acb 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -999,7 +999,7 @@ static int sahara_sha_prepare_request(struct ahash_request *req) sg_init_table(rctx->in_sg_chain, 2); sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt); - scatterwalk_sg_chain(rctx->in_sg_chain, 2, req->src); + sg_chain(rctx->in_sg_chain, 2, req->src); rctx->total = req->nbytes + rctx->buf_cnt; rctx->in_sg = rctx->in_sg_chain; @@ -1516,7 +1516,7 @@ static int sahara_probe(struct platform_device *pdev) } /* Allocate HW descriptors */ - dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev, + dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev, SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc), &dev->hw_phys_desc[0], GFP_KERNEL); if (!dev->hw_desc[0]) { @@ -1528,34 +1528,31 @@ static int sahara_probe(struct platform_device *pdev) sizeof(struct sahara_hw_desc); /* Allocate space for iv and key */ - dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, + dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, &dev->key_phys_base, GFP_KERNEL); if (!dev->key_base) { dev_err(&pdev->dev, "Could not allocate memory for key\n"); - err = -ENOMEM; - goto err_key; + return -ENOMEM; } dev->iv_base = dev->key_base + AES_KEYSIZE_128; dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128; /* Allocate space for context: largest digest + message length field */ - dev->context_base = dma_alloc_coherent(&pdev->dev, + dev->context_base = dmam_alloc_coherent(&pdev->dev, SHA256_DIGEST_SIZE + 4, &dev->context_phys_base, GFP_KERNEL); if (!dev->context_base) { dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n"); - err = -ENOMEM; - goto err_key; + return -ENOMEM; } /* Allocate space for HW links */ - dev->hw_link[0] = dma_alloc_coherent(&pdev->dev, + dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev, SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), &dev->hw_phys_link[0], GFP_KERNEL); if (!dev->hw_link[0]) { dev_err(&pdev->dev, "Could not allocate hw links\n"); - err = -ENOMEM; - goto err_link; + return -ENOMEM; } for (i = 1; i < SAHARA_MAX_HW_LINK; i++) { dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] + @@ -1572,15 +1569,14 @@ static int sahara_probe(struct platform_device *pdev) dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto"); if (IS_ERR(dev->kthread)) { - err = PTR_ERR(dev->kthread); - goto err_link; + return PTR_ERR(dev->kthread); } init_completion(&dev->dma_completion); err = clk_prepare_enable(dev->clk_ipg); if (err) - goto err_link; + return err; err = clk_prepare_enable(dev->clk_ahb); if (err) goto clk_ipg_disable; @@ -1620,25 +1616,11 @@ static int sahara_probe(struct platform_device *pdev) return 0; err_algs: - dma_free_coherent(&pdev->dev, - SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), - dev->hw_link[0], dev->hw_phys_link[0]); kthread_stop(dev->kthread); dev_ptr = NULL; clk_disable_unprepare(dev->clk_ahb); clk_ipg_disable: clk_disable_unprepare(dev->clk_ipg); -err_link: - dma_free_coherent(&pdev->dev, - 2 * AES_KEYSIZE_128, - dev->key_base, dev->key_phys_base); - dma_free_coherent(&pdev->dev, - SHA256_DIGEST_SIZE, - dev->context_base, dev->context_phys_base); -err_key: - dma_free_coherent(&pdev->dev, - SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc), - dev->hw_desc[0], dev->hw_phys_desc[0]); return err; } @@ -1647,16 +1629,6 @@ static int sahara_remove(struct platform_device *pdev) { struct sahara_dev *dev = platform_get_drvdata(pdev); - dma_free_coherent(&pdev->dev, - SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), - dev->hw_link[0], dev->hw_phys_link[0]); - dma_free_coherent(&pdev->dev, - 2 * AES_KEYSIZE_128, - dev->key_base, dev->key_phys_base); - dma_free_coherent(&pdev->dev, - SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc), - dev->hw_desc[0], dev->hw_phys_desc[0]); - kthread_stop(dev->kthread); sahara_unregister_algs(dev); diff --git a/drivers/crypto/sunxi-ss/Makefile b/drivers/crypto/sunxi-ss/Makefile new file mode 100644 index 000000000..8f4c7a273 --- /dev/null +++ b/drivers/crypto/sunxi-ss/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sun4i-ss.o +sun4i-ss-y += sun4i-ss-core.o sun4i-ss-hash.o sun4i-ss-cipher.o diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c new file mode 100644 index 000000000..a19ee127e --- /dev/null +++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c @@ -0,0 +1,542 @@ +/* + * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC + * + * Copyright (C) 2013-2015 Corentin LABBE + * + * This file add support for AES cipher with 128,192,256 bits + * keysize in CBC and ECB mode. + * Add support also for DES and 3DES in CBC and ECB mode. + * + * You could find the datasheet in Documentation/arm/sunxi/README + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#include "sun4i-ss.h" + +static int sun4i_ss_opti_poll(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct sun4i_ss_ctx *ss = op->ss; + unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); + struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq); + u32 mode = ctx->mode; + /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ + u32 rx_cnt = SS_RX_DEFAULT; + u32 tx_cnt = 0; + u32 spaces; + u32 v; + int i, err = 0; + unsigned int ileft = areq->nbytes; + unsigned int oleft = areq->nbytes; + unsigned int todo; + struct sg_mapping_iter mi, mo; + unsigned int oi, oo; /* offset for in and out */ + + if (areq->nbytes == 0) + return 0; + + if (!areq->info) { + dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n"); + return -EINVAL; + } + + if (!areq->src || !areq->dst) { + dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n"); + return -EINVAL; + } + + spin_lock_bh(&ss->slock); + + for (i = 0; i < op->keylen; i += 4) + writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); + + if (areq->info) { + for (i = 0; i < 4 && i < ivsize / 4; i++) { + v = *(u32 *)(areq->info + i * 4); + writel(v, ss->base + SS_IV0 + i * 4); + } + } + writel(mode, ss->base + SS_CTL); + + sg_miter_start(&mi, areq->src, sg_nents(areq->src), + SG_MITER_FROM_SG | SG_MITER_ATOMIC); + sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), + SG_MITER_TO_SG | SG_MITER_ATOMIC); + sg_miter_next(&mi); + sg_miter_next(&mo); + if (!mi.addr || !mo.addr) { + dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); + err = -EINVAL; + goto release_ss; + } + + ileft = areq->nbytes / 4; + oleft = areq->nbytes / 4; + oi = 0; + oo = 0; + do { + todo = min3(rx_cnt, ileft, (mi.length - oi) / 4); + if (todo > 0) { + ileft -= todo; + writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); + oi += todo * 4; + } + if (oi == mi.length) { + sg_miter_next(&mi); + oi = 0; + } + + spaces = readl(ss->base + SS_FCSR); + rx_cnt = SS_RXFIFO_SPACES(spaces); + tx_cnt = SS_TXFIFO_SPACES(spaces); + + todo = min3(tx_cnt, oleft, (mo.length - oo) / 4); + if (todo > 0) { + oleft -= todo; + readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); + oo += todo * 4; + } + if (oo == mo.length) { + sg_miter_next(&mo); + oo = 0; + } + } while (oleft > 0); + + if (areq->info) { + for (i = 0; i < 4 && i < ivsize / 4; i++) { + v = readl(ss->base + SS_IV0 + i * 4); + *(u32 *)(areq->info + i * 4) = v; + } + } + +release_ss: + sg_miter_stop(&mi); + sg_miter_stop(&mo); + writel(0, ss->base + SS_CTL); + spin_unlock_bh(&ss->slock); + return err; +} + +/* Generic function that support SG with size not multiple of 4 */ +static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct sun4i_ss_ctx *ss = op->ss; + int no_chunk = 1; + struct scatterlist *in_sg = areq->src; + struct scatterlist *out_sg = areq->dst; + unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); + struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq); + u32 mode = ctx->mode; + /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ + u32 rx_cnt = SS_RX_DEFAULT; + u32 tx_cnt = 0; + u32 v; + u32 spaces; + int i, err = 0; + unsigned int ileft = areq->nbytes; + unsigned int oleft = areq->nbytes; + unsigned int todo; + struct sg_mapping_iter mi, mo; + unsigned int oi, oo; /* offset for in and out */ + char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */ + char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */ + unsigned int ob = 0; /* offset in buf */ + unsigned int obo = 0; /* offset in bufo*/ + unsigned int obl = 0; /* length of data in bufo */ + + if (areq->nbytes == 0) + return 0; + + if (!areq->info) { + dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n"); + return -EINVAL; + } + + if (!areq->src || !areq->dst) { + dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n"); + return -EINVAL; + } + + /* + * if we have only SGs with size multiple of 4, + * we can use the SS optimized function + */ + while (in_sg && no_chunk == 1) { + if ((in_sg->length % 4) != 0) + no_chunk = 0; + in_sg = sg_next(in_sg); + } + while (out_sg && no_chunk == 1) { + if ((out_sg->length % 4) != 0) + no_chunk = 0; + out_sg = sg_next(out_sg); + } + + if (no_chunk == 1) + return sun4i_ss_opti_poll(areq); + + spin_lock_bh(&ss->slock); + + for (i = 0; i < op->keylen; i += 4) + writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); + + if (areq->info) { + for (i = 0; i < 4 && i < ivsize / 4; i++) { + v = *(u32 *)(areq->info + i * 4); + writel(v, ss->base + SS_IV0 + i * 4); + } + } + writel(mode, ss->base + SS_CTL); + + sg_miter_start(&mi, areq->src, sg_nents(areq->src), + SG_MITER_FROM_SG | SG_MITER_ATOMIC); + sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), + SG_MITER_TO_SG | SG_MITER_ATOMIC); + sg_miter_next(&mi); + sg_miter_next(&mo); + if (!mi.addr || !mo.addr) { + dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); + err = -EINVAL; + goto release_ss; + } + ileft = areq->nbytes; + oleft = areq->nbytes; + oi = 0; + oo = 0; + + while (oleft > 0) { + if (ileft > 0) { + /* + * todo is the number of consecutive 4byte word that we + * can read from current SG + */ + todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4); + if (todo > 0 && ob == 0) { + writesl(ss->base + SS_RXFIFO, mi.addr + oi, + todo); + ileft -= todo * 4; + oi += todo * 4; + } else { + /* + * not enough consecutive bytes, so we need to + * linearize in buf. todo is in bytes + * After that copy, if we have a multiple of 4 + * we need to be able to write all buf in one + * pass, so it is why we min() with rx_cnt + */ + todo = min3(rx_cnt * 4 - ob, ileft, + mi.length - oi); + memcpy(buf + ob, mi.addr + oi, todo); + ileft -= todo; + oi += todo; + ob += todo; + if (ob % 4 == 0) { + writesl(ss->base + SS_RXFIFO, buf, + ob / 4); + ob = 0; + } + } + if (oi == mi.length) { + sg_miter_next(&mi); + oi = 0; + } + } + + spaces = readl(ss->base + SS_FCSR); + rx_cnt = SS_RXFIFO_SPACES(spaces); + tx_cnt = SS_TXFIFO_SPACES(spaces); + dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u %u\n", + mode, + oi, mi.length, ileft, areq->nbytes, rx_cnt, + oo, mo.length, oleft, areq->nbytes, tx_cnt, + todo, ob); + + if (tx_cnt == 0) + continue; + /* todo in 4bytes word */ + todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4); + if (todo > 0) { + readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); + oleft -= todo * 4; + oo += todo * 4; + if (oo == mo.length) { + sg_miter_next(&mo); + oo = 0; + } + } else { + /* + * read obl bytes in bufo, we read at maximum for + * emptying the device + */ + readsl(ss->base + SS_TXFIFO, bufo, tx_cnt); + obl = tx_cnt * 4; + obo = 0; + do { + /* + * how many bytes we can copy ? + * no more than remaining SG size + * no more than remaining buffer + * no need to test against oleft + */ + todo = min(mo.length - oo, obl - obo); + memcpy(mo.addr + oo, bufo + obo, todo); + oleft -= todo; + obo += todo; + oo += todo; + if (oo == mo.length) { + sg_miter_next(&mo); + oo = 0; + } + } while (obo < obl); + /* bufo must be fully used here */ + } + } + if (areq->info) { + for (i = 0; i < 4 && i < ivsize / 4; i++) { + v = readl(ss->base + SS_IV0 + i * 4); + *(u32 *)(areq->info + i * 4) = v; + } + } + +release_ss: + sg_miter_stop(&mi); + sg_miter_stop(&mo); + writel(0, ss->base + SS_CTL); + spin_unlock_bh(&ss->slock); + + return err; +} + +/* CBC AES */ +int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + + rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + + rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +/* ECB AES */ +int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + + rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + + rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +/* CBC DES */ +int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + + rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + + rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +/* ECB DES */ +int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + + rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + + rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +/* CBC 3DES */ +int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + + rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + + rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +/* ECB 3DES */ +int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + + rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + + rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +int sun4i_ss_cipher_init(struct crypto_tfm *tfm) +{ + struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); + struct crypto_alg *alg = tfm->__crt_alg; + struct sun4i_ss_alg_template *algt; + + memset(op, 0, sizeof(struct sun4i_tfm_ctx)); + + algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto); + op->ss = algt->ss; + + tfm->crt_ablkcipher.reqsize = sizeof(struct sun4i_cipher_req_ctx); + + return 0; +} + +/* check and set the AES key, prepare the mode to be used */ +int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct sun4i_ss_ctx *ss = op->ss; + + switch (keylen) { + case 128 / 8: + op->keymode = SS_AES_128BITS; + break; + case 192 / 8: + op->keymode = SS_AES_192BITS; + break; + case 256 / 8: + op->keymode = SS_AES_256BITS; + break; + default: + dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen); + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + op->keylen = keylen; + memcpy(op->key, key, keylen); + return 0; +} + +/* check and set the DES key, prepare the mode to be used */ +int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct sun4i_ss_ctx *ss = op->ss; + u32 flags; + u32 tmp[DES_EXPKEY_WORDS]; + int ret; + + if (unlikely(keylen != DES_KEY_SIZE)) { + dev_err(ss->dev, "Invalid keylen %u\n", keylen); + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + flags = crypto_ablkcipher_get_flags(tfm); + + ret = des_ekey(tmp, key); + if (unlikely(ret == 0) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); + dev_dbg(ss->dev, "Weak key %u\n", keylen); + return -EINVAL; + } + + op->keylen = keylen; + memcpy(op->key, key, keylen); + return 0; +} + +/* check and set the 3DES key, prepare the mode to be used */ +int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct sun4i_ss_ctx *ss = op->ss; + + if (unlikely(keylen != 3 * DES_KEY_SIZE)) { + dev_err(ss->dev, "Invalid keylen %u\n", keylen); + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + op->keylen = keylen; + memcpy(op->key, key, keylen); + return 0; +} diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c new file mode 100644 index 000000000..eab6fe227 --- /dev/null +++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c @@ -0,0 +1,425 @@ +/* + * sun4i-ss-core.c - hardware cryptographic accelerator for Allwinner A20 SoC + * + * Copyright (C) 2013-2015 Corentin LABBE + * + * Core file which registers crypto algorithms supported by the SS. + * + * You could find a link for the datasheet in Documentation/arm/sunxi/README + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sun4i-ss.h" + +static struct sun4i_ss_alg_template ss_algs[] = { +{ .type = CRYPTO_ALG_TYPE_AHASH, + .mode = SS_OP_MD5, + .alg.hash = { + .init = sun4i_hash_init, + .update = sun4i_hash_update, + .final = sun4i_hash_final, + .finup = sun4i_hash_finup, + .digest = sun4i_hash_digest, + .export = sun4i_hash_export_md5, + .import = sun4i_hash_import_md5, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "md5", + .cra_driver_name = "md5-sun4i-ss", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun4i_req_ctx), + .cra_module = THIS_MODULE, + .cra_type = &crypto_ahash_type, + .cra_init = sun4i_hash_crainit + } + } + } +}, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .mode = SS_OP_SHA1, + .alg.hash = { + .init = sun4i_hash_init, + .update = sun4i_hash_update, + .final = sun4i_hash_final, + .finup = sun4i_hash_finup, + .digest = sun4i_hash_digest, + .export = sun4i_hash_export_sha1, + .import = sun4i_hash_import_sha1, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-sun4i-ss", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun4i_req_ctx), + .cra_module = THIS_MODULE, + .cra_type = &crypto_ahash_type, + .cra_init = sun4i_hash_crainit + } + } + } +}, +{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .alg.crypto = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-sun4i-ss", + .cra_priority = 300, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, + .cra_ctxsize = sizeof(struct sun4i_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_init = sun4i_ss_cipher_init, + .cra_ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = sun4i_ss_aes_setkey, + .encrypt = sun4i_ss_cbc_aes_encrypt, + .decrypt = sun4i_ss_cbc_aes_decrypt, + } + } +}, +{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .alg.crypto = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-sun4i-ss", + .cra_priority = 300, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, + .cra_ctxsize = sizeof(struct sun4i_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_init = sun4i_ss_cipher_init, + .cra_ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = sun4i_ss_aes_setkey, + .encrypt = sun4i_ss_ecb_aes_encrypt, + .decrypt = sun4i_ss_ecb_aes_decrypt, + } + } +}, +{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .alg.crypto = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc-des-sun4i-ss", + .cra_priority = 300, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, + .cra_ctxsize = sizeof(struct sun4i_req_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_init = sun4i_ss_cipher_init, + .cra_u.ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = sun4i_ss_des_setkey, + .encrypt = sun4i_ss_cbc_des_encrypt, + .decrypt = sun4i_ss_cbc_des_decrypt, + } + } +}, +{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .alg.crypto = { + .cra_name = "ecb(des)", + .cra_driver_name = "ecb-des-sun4i-ss", + .cra_priority = 300, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, + .cra_ctxsize = sizeof(struct sun4i_req_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_init = sun4i_ss_cipher_init, + .cra_u.ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = sun4i_ss_des_setkey, + .encrypt = sun4i_ss_ecb_des_encrypt, + .decrypt = sun4i_ss_ecb_des_decrypt, + } + } +}, +{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .alg.crypto = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-des3-sun4i-ss", + .cra_priority = 300, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, + .cra_ctxsize = sizeof(struct sun4i_req_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_init = sun4i_ss_cipher_init, + .cra_u.ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setkey = sun4i_ss_des3_setkey, + .encrypt = sun4i_ss_cbc_des3_encrypt, + .decrypt = sun4i_ss_cbc_des3_decrypt, + } + } +}, +{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .alg.crypto = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-des3-sun4i-ss", + .cra_priority = 300, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, + .cra_ctxsize = sizeof(struct sun4i_req_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_init = sun4i_ss_cipher_init, + .cra_u.ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setkey = sun4i_ss_des3_setkey, + .encrypt = sun4i_ss_ecb_des3_encrypt, + .decrypt = sun4i_ss_ecb_des3_decrypt, + } + } +}, +}; + +static int sun4i_ss_probe(struct platform_device *pdev) +{ + struct resource *res; + u32 v; + int err, i; + unsigned long cr; + const unsigned long cr_ahb = 24 * 1000 * 1000; + const unsigned long cr_mod = 150 * 1000 * 1000; + struct sun4i_ss_ctx *ss; + + if (!pdev->dev.of_node) + return -ENODEV; + + ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL); + if (!ss) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ss->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ss->base)) { + dev_err(&pdev->dev, "Cannot request MMIO\n"); + return PTR_ERR(ss->base); + } + + ss->ssclk = devm_clk_get(&pdev->dev, "mod"); + if (IS_ERR(ss->ssclk)) { + err = PTR_ERR(ss->ssclk); + dev_err(&pdev->dev, "Cannot get SS clock err=%d\n", err); + return err; + } + dev_dbg(&pdev->dev, "clock ss acquired\n"); + + ss->busclk = devm_clk_get(&pdev->dev, "ahb"); + if (IS_ERR(ss->busclk)) { + err = PTR_ERR(ss->busclk); + dev_err(&pdev->dev, "Cannot get AHB SS clock err=%d\n", err); + return err; + } + dev_dbg(&pdev->dev, "clock ahb_ss acquired\n"); + + ss->reset = devm_reset_control_get_optional(&pdev->dev, "ahb"); + if (IS_ERR(ss->reset)) { + if (PTR_ERR(ss->reset) == -EPROBE_DEFER) + return PTR_ERR(ss->reset); + dev_info(&pdev->dev, "no reset control found\n"); + ss->reset = NULL; + } + + /* Enable both clocks */ + err = clk_prepare_enable(ss->busclk); + if (err != 0) { + dev_err(&pdev->dev, "Cannot prepare_enable busclk\n"); + return err; + } + err = clk_prepare_enable(ss->ssclk); + if (err != 0) { + dev_err(&pdev->dev, "Cannot prepare_enable ssclk\n"); + goto error_ssclk; + } + + /* + * Check that clock have the correct rates given in the datasheet + * Try to set the clock to the maximum allowed + */ + err = clk_set_rate(ss->ssclk, cr_mod); + if (err != 0) { + dev_err(&pdev->dev, "Cannot set clock rate to ssclk\n"); + goto error_clk; + } + + /* Deassert reset if we have a reset control */ + if (ss->reset) { + err = reset_control_deassert(ss->reset); + if (err) { + dev_err(&pdev->dev, "Cannot deassert reset control\n"); + goto error_clk; + } + } + + /* + * The only impact on clocks below requirement are bad performance, + * so do not print "errors" + * warn on Overclocked clocks + */ + cr = clk_get_rate(ss->busclk); + if (cr >= cr_ahb) + dev_dbg(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n", + cr, cr / 1000000, cr_ahb); + else + dev_warn(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n", + cr, cr / 1000000, cr_ahb); + + cr = clk_get_rate(ss->ssclk); + if (cr <= cr_mod) + if (cr < cr_mod) + dev_warn(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n", + cr, cr / 1000000, cr_mod); + else + dev_dbg(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n", + cr, cr / 1000000, cr_mod); + else + dev_warn(&pdev->dev, "Clock ss is at %lu (%lu MHz) (must be <= %lu)\n", + cr, cr / 1000000, cr_mod); + + /* + * Datasheet named it "Die Bonding ID" + * I expect to be a sort of Security System Revision number. + * Since the A80 seems to have an other version of SS + * this info could be useful + */ + writel(SS_ENABLED, ss->base + SS_CTL); + v = readl(ss->base + SS_CTL); + v >>= 16; + v &= 0x07; + dev_info(&pdev->dev, "Die ID %d\n", v); + writel(0, ss->base + SS_CTL); + + ss->dev = &pdev->dev; + + spin_lock_init(&ss->slock); + + for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { + ss_algs[i].ss = ss; + switch (ss_algs[i].type) { + case CRYPTO_ALG_TYPE_ABLKCIPHER: + err = crypto_register_alg(&ss_algs[i].alg.crypto); + if (err != 0) { + dev_err(ss->dev, "Fail to register %s\n", + ss_algs[i].alg.crypto.cra_name); + goto error_alg; + } + break; + case CRYPTO_ALG_TYPE_AHASH: + err = crypto_register_ahash(&ss_algs[i].alg.hash); + if (err != 0) { + dev_err(ss->dev, "Fail to register %s\n", + ss_algs[i].alg.hash.halg.base.cra_name); + goto error_alg; + } + break; + } + } + platform_set_drvdata(pdev, ss); + return 0; +error_alg: + i--; + for (; i >= 0; i--) { + switch (ss_algs[i].type) { + case CRYPTO_ALG_TYPE_ABLKCIPHER: + crypto_unregister_alg(&ss_algs[i].alg.crypto); + break; + case CRYPTO_ALG_TYPE_AHASH: + crypto_unregister_ahash(&ss_algs[i].alg.hash); + break; + } + } + if (ss->reset) + reset_control_assert(ss->reset); +error_clk: + clk_disable_unprepare(ss->ssclk); +error_ssclk: + clk_disable_unprepare(ss->busclk); + return err; +} + +static int sun4i_ss_remove(struct platform_device *pdev) +{ + int i; + struct sun4i_ss_ctx *ss = platform_get_drvdata(pdev); + + for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { + switch (ss_algs[i].type) { + case CRYPTO_ALG_TYPE_ABLKCIPHER: + crypto_unregister_alg(&ss_algs[i].alg.crypto); + break; + case CRYPTO_ALG_TYPE_AHASH: + crypto_unregister_ahash(&ss_algs[i].alg.hash); + break; + } + } + + writel(0, ss->base + SS_CTL); + if (ss->reset) + reset_control_assert(ss->reset); + clk_disable_unprepare(ss->busclk); + clk_disable_unprepare(ss->ssclk); + return 0; +} + +static const struct of_device_id a20ss_crypto_of_match_table[] = { + { .compatible = "allwinner,sun4i-a10-crypto" }, + {} +}; +MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table); + +static struct platform_driver sun4i_ss_driver = { + .probe = sun4i_ss_probe, + .remove = sun4i_ss_remove, + .driver = { + .name = "sun4i-ss", + .of_match_table = a20ss_crypto_of_match_table, + }, +}; + +module_platform_driver(sun4i_ss_driver); + +MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Corentin LABBE "); diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c new file mode 100644 index 000000000..ff8031498 --- /dev/null +++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c @@ -0,0 +1,492 @@ +/* + * sun4i-ss-hash.c - hardware cryptographic accelerator for Allwinner A20 SoC + * + * Copyright (C) 2013-2015 Corentin LABBE + * + * This file add support for MD5 and SHA1. + * + * You could find the datasheet in Documentation/arm/sunxi/README + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#include "sun4i-ss.h" +#include + +/* This is a totally arbitrary value */ +#define SS_TIMEOUT 100 + +int sun4i_hash_crainit(struct crypto_tfm *tfm) +{ + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct sun4i_req_ctx)); + return 0; +} + +/* sun4i_hash_init: initialize request context */ +int sun4i_hash_init(struct ahash_request *areq) +{ + struct sun4i_req_ctx *op = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun4i_ss_alg_template *algt; + struct sun4i_ss_ctx *ss; + + memset(op, 0, sizeof(struct sun4i_req_ctx)); + + algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash); + ss = algt->ss; + op->ss = algt->ss; + op->mode = algt->mode; + + return 0; +} + +int sun4i_hash_export_md5(struct ahash_request *areq, void *out) +{ + struct sun4i_req_ctx *op = ahash_request_ctx(areq); + struct md5_state *octx = out; + int i; + + octx->byte_count = op->byte_count + op->len; + + memcpy(octx->block, op->buf, op->len); + + if (op->byte_count > 0) { + for (i = 0; i < 4; i++) + octx->hash[i] = op->hash[i]; + } else { + octx->hash[0] = SHA1_H0; + octx->hash[1] = SHA1_H1; + octx->hash[2] = SHA1_H2; + octx->hash[3] = SHA1_H3; + } + + return 0; +} + +int sun4i_hash_import_md5(struct ahash_request *areq, const void *in) +{ + struct sun4i_req_ctx *op = ahash_request_ctx(areq); + const struct md5_state *ictx = in; + int i; + + sun4i_hash_init(areq); + + op->byte_count = ictx->byte_count & ~0x3F; + op->len = ictx->byte_count & 0x3F; + + memcpy(op->buf, ictx->block, op->len); + + for (i = 0; i < 4; i++) + op->hash[i] = ictx->hash[i]; + + return 0; +} + +int sun4i_hash_export_sha1(struct ahash_request *areq, void *out) +{ + struct sun4i_req_ctx *op = ahash_request_ctx(areq); + struct sha1_state *octx = out; + int i; + + octx->count = op->byte_count + op->len; + + memcpy(octx->buffer, op->buf, op->len); + + if (op->byte_count > 0) { + for (i = 0; i < 5; i++) + octx->state[i] = op->hash[i]; + } else { + octx->state[0] = SHA1_H0; + octx->state[1] = SHA1_H1; + octx->state[2] = SHA1_H2; + octx->state[3] = SHA1_H3; + octx->state[4] = SHA1_H4; + } + + return 0; +} + +int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in) +{ + struct sun4i_req_ctx *op = ahash_request_ctx(areq); + const struct sha1_state *ictx = in; + int i; + + sun4i_hash_init(areq); + + op->byte_count = ictx->count & ~0x3F; + op->len = ictx->count & 0x3F; + + memcpy(op->buf, ictx->buffer, op->len); + + for (i = 0; i < 5; i++) + op->hash[i] = ictx->state[i]; + + return 0; +} + +/* + * sun4i_hash_update: update hash engine + * + * Could be used for both SHA1 and MD5 + * Write data by step of 32bits and put then in the SS. + * + * Since we cannot leave partial data and hash state in the engine, + * we need to get the hash state at the end of this function. + * We can get the hash state every 64 bytes + * + * So the first work is to get the number of bytes to write to SS modulo 64 + * The extra bytes will go to a temporary buffer op->buf storing op->len bytes + * + * So at the begin of update() + * if op->len + areq->nbytes < 64 + * => all data will be written to wait buffer (op->buf) and end=0 + * if not, write all data from op->buf to the device and position end to + * complete to 64bytes + * + * example 1: + * update1 60o => op->len=60 + * update2 60o => need one more word to have 64 bytes + * end=4 + * so write all data from op->buf and one word of SGs + * write remaining data in op->buf + * final state op->len=56 + */ +int sun4i_hash_update(struct ahash_request *areq) +{ + u32 v, ivmode = 0; + unsigned int i = 0; + /* + * i is the total bytes read from SGs, to be compared to areq->nbytes + * i is important because we cannot rely on SG length since the sum of + * SG->length could be greater than areq->nbytes + */ + + struct sun4i_req_ctx *op = ahash_request_ctx(areq); + struct sun4i_ss_ctx *ss = op->ss; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + unsigned int in_i = 0; /* advancement in the current SG */ + unsigned int end; + /* + * end is the position when we need to stop writing to the device, + * to be compared to i + */ + int in_r, err = 0; + unsigned int todo; + u32 spaces, rx_cnt = SS_RX_DEFAULT; + size_t copied = 0; + struct sg_mapping_iter mi; + + dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x", + __func__, crypto_tfm_alg_name(areq->base.tfm), + op->byte_count, areq->nbytes, op->mode, + op->len, op->hash[0]); + + if (areq->nbytes == 0) + return 0; + + /* protect against overflow */ + if (areq->nbytes > UINT_MAX - op->len) { + dev_err(ss->dev, "Cannot process too large request\n"); + return -EINVAL; + } + + if (op->len + areq->nbytes < 64) { + /* linearize data to op->buf */ + copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), + op->buf + op->len, areq->nbytes, 0); + op->len += copied; + return 0; + } + + end = ((areq->nbytes + op->len) / 64) * 64 - op->len; + + if (end > areq->nbytes || areq->nbytes - end > 63) { + dev_err(ss->dev, "ERROR: Bound error %u %u\n", + end, areq->nbytes); + return -EINVAL; + } + + spin_lock_bh(&ss->slock); + + /* + * if some data have been processed before, + * we need to restore the partial hash state + */ + if (op->byte_count > 0) { + ivmode = SS_IV_ARBITRARY; + for (i = 0; i < 5; i++) + writel(op->hash[i], ss->base + SS_IV0 + i * 4); + } + /* Enable the device */ + writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL); + + i = 0; + sg_miter_start(&mi, areq->src, sg_nents(areq->src), + SG_MITER_FROM_SG | SG_MITER_ATOMIC); + sg_miter_next(&mi); + in_i = 0; + + do { + /* + * we need to linearize in two case: + * - the buffer is already used + * - the SG does not have enough byte remaining ( < 4) + */ + if (op->len > 0 || (mi.length - in_i) < 4) { + /* + * if we have entered here we have two reason to stop + * - the buffer is full + * - reach the end + */ + while (op->len < 64 && i < end) { + /* how many bytes we can read from current SG */ + in_r = min3(mi.length - in_i, end - i, + 64 - op->len); + memcpy(op->buf + op->len, mi.addr + in_i, in_r); + op->len += in_r; + i += in_r; + in_i += in_r; + if (in_i == mi.length) { + sg_miter_next(&mi); + in_i = 0; + } + } + if (op->len > 3 && (op->len % 4) == 0) { + /* write buf to the device */ + writesl(ss->base + SS_RXFIFO, op->buf, + op->len / 4); + op->byte_count += op->len; + op->len = 0; + } + } + if (mi.length - in_i > 3 && i < end) { + /* how many bytes we can read from current SG */ + in_r = min3(mi.length - in_i, areq->nbytes - i, + ((mi.length - in_i) / 4) * 4); + /* how many bytes we can write in the device*/ + todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4); + writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo); + op->byte_count += todo * 4; + i += todo * 4; + in_i += todo * 4; + rx_cnt -= todo; + if (rx_cnt == 0) { + spaces = readl(ss->base + SS_FCSR); + rx_cnt = SS_RXFIFO_SPACES(spaces); + } + if (in_i == mi.length) { + sg_miter_next(&mi); + in_i = 0; + } + } + } while (i < end); + /* final linear */ + if ((areq->nbytes - i) < 64) { + while (i < areq->nbytes && in_i < mi.length && op->len < 64) { + /* how many bytes we can read from current SG */ + in_r = min3(mi.length - in_i, areq->nbytes - i, + 64 - op->len); + memcpy(op->buf + op->len, mi.addr + in_i, in_r); + op->len += in_r; + i += in_r; + in_i += in_r; + if (in_i == mi.length) { + sg_miter_next(&mi); + in_i = 0; + } + } + } + + sg_miter_stop(&mi); + + writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL); + i = 0; + do { + v = readl(ss->base + SS_CTL); + i++; + } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0); + if (i >= SS_TIMEOUT) { + dev_err_ratelimited(ss->dev, + "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", + i, SS_TIMEOUT, v, areq->nbytes); + err = -EIO; + goto release_ss; + } + + /* get the partial hash only if something was written */ + for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) + op->hash[i] = readl(ss->base + SS_MD0 + i * 4); + +release_ss: + writel(0, ss->base + SS_CTL); + spin_unlock_bh(&ss->slock); + return err; +} + +/* + * sun4i_hash_final: finalize hashing operation + * + * If we have some remaining bytes, we write them. + * Then ask the SS for finalizing the hashing operation + * + * I do not check RX FIFO size in this function since the size is 32 + * after each enabling and this function neither write more than 32 words. + */ +int sun4i_hash_final(struct ahash_request *areq) +{ + u32 v, ivmode = 0; + unsigned int i; + unsigned int j = 0; + int zeros, err = 0; + unsigned int index, padlen; + __be64 bits; + struct sun4i_req_ctx *op = ahash_request_ctx(areq); + struct sun4i_ss_ctx *ss = op->ss; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + u32 bf[32]; + u32 wb = 0; + unsigned int nwait, nbw = 0; + + dev_dbg(ss->dev, "%s: byte=%llu len=%u mode=%x wl=%u h=%x", + __func__, op->byte_count, areq->nbytes, op->mode, + op->len, op->hash[0]); + + spin_lock_bh(&ss->slock); + + /* + * if we have already written something, + * restore the partial hash state + */ + if (op->byte_count > 0) { + ivmode = SS_IV_ARBITRARY; + for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) + writel(op->hash[i], ss->base + SS_IV0 + i * 4); + } + writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL); + + /* write the remaining words of the wait buffer */ + if (op->len > 0) { + nwait = op->len / 4; + if (nwait > 0) { + writesl(ss->base + SS_RXFIFO, op->buf, nwait); + op->byte_count += 4 * nwait; + } + nbw = op->len - 4 * nwait; + wb = *(u32 *)(op->buf + nwait * 4); + wb &= (0xFFFFFFFF >> (4 - nbw) * 8); + } + + /* write the remaining bytes of the nbw buffer */ + if (nbw > 0) { + wb |= ((1 << 7) << (nbw * 8)); + bf[j++] = wb; + } else { + bf[j++] = 1 << 7; + } + + /* + * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1) + * I take the operations from other MD5/SHA1 implementations + */ + + /* we have already send 4 more byte of which nbw data */ + if (op->mode == SS_OP_MD5) { + index = (op->byte_count + 4) & 0x3f; + op->byte_count += nbw; + if (index > 56) + zeros = (120 - index) / 4; + else + zeros = (56 - index) / 4; + } else { + op->byte_count += nbw; + index = op->byte_count & 0x3f; + padlen = (index < 56) ? (56 - index) : ((64 + 56) - index); + zeros = (padlen - 1) / 4; + } + + memset(bf + j, 0, 4 * zeros); + j += zeros; + + /* write the length of data */ + if (op->mode == SS_OP_SHA1) { + bits = cpu_to_be64(op->byte_count << 3); + bf[j++] = bits & 0xffffffff; + bf[j++] = (bits >> 32) & 0xffffffff; + } else { + bf[j++] = (op->byte_count << 3) & 0xffffffff; + bf[j++] = (op->byte_count >> 29) & 0xffffffff; + } + writesl(ss->base + SS_RXFIFO, bf, j); + + /* Tell the SS to stop the hashing */ + writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL); + + /* + * Wait for SS to finish the hash. + * The timeout could happen only in case of bad overcloking + * or driver bug. + */ + i = 0; + do { + v = readl(ss->base + SS_CTL); + i++; + } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0); + if (i >= SS_TIMEOUT) { + dev_err_ratelimited(ss->dev, + "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", + i, SS_TIMEOUT, v, areq->nbytes); + err = -EIO; + goto release_ss; + } + + /* Get the hash from the device */ + if (op->mode == SS_OP_SHA1) { + for (i = 0; i < 5; i++) { + v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4)); + memcpy(areq->result + i * 4, &v, 4); + } + } else { + for (i = 0; i < 4; i++) { + v = readl(ss->base + SS_MD0 + i * 4); + memcpy(areq->result + i * 4, &v, 4); + } + } + +release_ss: + writel(0, ss->base + SS_CTL); + spin_unlock_bh(&ss->slock); + return err; +} + +/* sun4i_hash_finup: finalize hashing operation after an update */ +int sun4i_hash_finup(struct ahash_request *areq) +{ + int err; + + err = sun4i_hash_update(areq); + if (err != 0) + return err; + + return sun4i_hash_final(areq); +} + +/* combo of init/update/final functions */ +int sun4i_hash_digest(struct ahash_request *areq) +{ + int err; + + err = sun4i_hash_init(areq); + if (err != 0) + return err; + + err = sun4i_hash_update(areq); + if (err != 0) + return err; + + return sun4i_hash_final(areq); +} diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h new file mode 100644 index 000000000..8e9c05f6e --- /dev/null +++ b/drivers/crypto/sunxi-ss/sun4i-ss.h @@ -0,0 +1,201 @@ +/* + * sun4i-ss.h - hardware cryptographic accelerator for Allwinner A20 SoC + * + * Copyright (C) 2013-2015 Corentin LABBE + * + * Support AES cipher with 128,192,256 bits keysize. + * Support MD5 and SHA1 hash algorithms. + * Support DES and 3DES + * + * You could find the datasheet in Documentation/arm/sunxi/README + * + * Licensed under the GPL-2. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SS_CTL 0x00 +#define SS_KEY0 0x04 +#define SS_KEY1 0x08 +#define SS_KEY2 0x0C +#define SS_KEY3 0x10 +#define SS_KEY4 0x14 +#define SS_KEY5 0x18 +#define SS_KEY6 0x1C +#define SS_KEY7 0x20 + +#define SS_IV0 0x24 +#define SS_IV1 0x28 +#define SS_IV2 0x2C +#define SS_IV3 0x30 + +#define SS_FCSR 0x44 + +#define SS_MD0 0x4C +#define SS_MD1 0x50 +#define SS_MD2 0x54 +#define SS_MD3 0x58 +#define SS_MD4 0x5C + +#define SS_RXFIFO 0x200 +#define SS_TXFIFO 0x204 + +/* SS_CTL configuration values */ + +/* PRNG generator mode - bit 15 */ +#define SS_PRNG_ONESHOT (0 << 15) +#define SS_PRNG_CONTINUE (1 << 15) + +/* IV mode for hash */ +#define SS_IV_ARBITRARY (1 << 14) + +/* SS operation mode - bits 12-13 */ +#define SS_ECB (0 << 12) +#define SS_CBC (1 << 12) +#define SS_CTS (3 << 12) + +/* Counter width for CNT mode - bits 10-11 */ +#define SS_CNT_16BITS (0 << 10) +#define SS_CNT_32BITS (1 << 10) +#define SS_CNT_64BITS (2 << 10) + +/* Key size for AES - bits 8-9 */ +#define SS_AES_128BITS (0 << 8) +#define SS_AES_192BITS (1 << 8) +#define SS_AES_256BITS (2 << 8) + +/* Operation direction - bit 7 */ +#define SS_ENCRYPTION (0 << 7) +#define SS_DECRYPTION (1 << 7) + +/* SS Method - bits 4-6 */ +#define SS_OP_AES (0 << 4) +#define SS_OP_DES (1 << 4) +#define SS_OP_3DES (2 << 4) +#define SS_OP_SHA1 (3 << 4) +#define SS_OP_MD5 (4 << 4) +#define SS_OP_PRNG (5 << 4) + +/* Data end bit - bit 2 */ +#define SS_DATA_END (1 << 2) + +/* PRNG start bit - bit 1 */ +#define SS_PRNG_START (1 << 1) + +/* SS Enable bit - bit 0 */ +#define SS_DISABLED (0 << 0) +#define SS_ENABLED (1 << 0) + +/* SS_FCSR configuration values */ +/* RX FIFO status - bit 30 */ +#define SS_RXFIFO_FREE (1 << 30) + +/* RX FIFO empty spaces - bits 24-29 */ +#define SS_RXFIFO_SPACES(val) (((val) >> 24) & 0x3f) + +/* TX FIFO status - bit 22 */ +#define SS_TXFIFO_AVAILABLE (1 << 22) + +/* TX FIFO available spaces - bits 16-21 */ +#define SS_TXFIFO_SPACES(val) (((val) >> 16) & 0x3f) + +#define SS_RX_MAX 32 +#define SS_RX_DEFAULT SS_RX_MAX +#define SS_TX_MAX 33 + +#define SS_RXFIFO_EMP_INT_PENDING (1 << 10) +#define SS_TXFIFO_AVA_INT_PENDING (1 << 8) +#define SS_RXFIFO_EMP_INT_ENABLE (1 << 2) +#define SS_TXFIFO_AVA_INT_ENABLE (1 << 0) + +struct sun4i_ss_ctx { + void __iomem *base; + int irq; + struct clk *busclk; + struct clk *ssclk; + struct reset_control *reset; + struct device *dev; + struct resource *res; + spinlock_t slock; /* control the use of the device */ +}; + +struct sun4i_ss_alg_template { + u32 type; + u32 mode; + union { + struct crypto_alg crypto; + struct ahash_alg hash; + } alg; + struct sun4i_ss_ctx *ss; +}; + +struct sun4i_tfm_ctx { + u32 key[AES_MAX_KEY_SIZE / 4];/* divided by sizeof(u32) */ + u32 keylen; + u32 keymode; + struct sun4i_ss_ctx *ss; +}; + +struct sun4i_cipher_req_ctx { + u32 mode; +}; + +struct sun4i_req_ctx { + u32 mode; + u64 byte_count; /* number of bytes "uploaded" to the device */ + u32 hash[5]; /* for storing SS_IVx register */ + char buf[64]; + unsigned int len; + struct sun4i_ss_ctx *ss; +}; + +int sun4i_hash_crainit(struct crypto_tfm *tfm); +int sun4i_hash_init(struct ahash_request *areq); +int sun4i_hash_update(struct ahash_request *areq); +int sun4i_hash_final(struct ahash_request *areq); +int sun4i_hash_finup(struct ahash_request *areq); +int sun4i_hash_digest(struct ahash_request *areq); +int sun4i_hash_export_md5(struct ahash_request *areq, void *out); +int sun4i_hash_import_md5(struct ahash_request *areq, const void *in); +int sun4i_hash_export_sha1(struct ahash_request *areq, void *out); +int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in); + +int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq); +int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq); +int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq); +int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq); + +int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq); +int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq); +int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq); +int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq); + +int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq); +int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq); +int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq); +int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq); + +int sun4i_ss_cipher_init(struct crypto_tfm *tfm); +int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen); +int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen); +int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen); diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 83aca95a9..3b20a1bce 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -766,6 +766,7 @@ static int talitos_rng_init(struct hwrng *rng) static int talitos_register_rng(struct device *dev) { struct talitos_private *priv = dev_get_drvdata(dev); + int err; priv->rng.name = dev_driver_string(dev), priv->rng.init = talitos_rng_init, @@ -773,14 +774,22 @@ static int talitos_register_rng(struct device *dev) priv->rng.data_read = talitos_rng_data_read, priv->rng.priv = (unsigned long)dev; - return hwrng_register(&priv->rng); + err = hwrng_register(&priv->rng); + if (!err) + priv->rng_registered = true; + + return err; } static void talitos_unregister_rng(struct device *dev) { struct talitos_private *priv = dev_get_drvdata(dev); + if (!priv->rng_registered) + return; + hwrng_unregister(&priv->rng); + priv->rng_registered = false; } /* @@ -799,7 +808,6 @@ struct talitos_ctx { unsigned int keylen; unsigned int enckeylen; unsigned int authkeylen; - unsigned int authsize; }; #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE @@ -819,16 +827,6 @@ struct talitos_ahash_req_ctx { struct scatterlist *psrc; }; -static int aead_setauthsize(struct crypto_aead *authenc, - unsigned int authsize) -{ - struct talitos_ctx *ctx = crypto_aead_ctx(authenc); - - ctx->authsize = authsize; - - return 0; -} - static int aead_setkey(struct crypto_aead *authenc, const u8 *key, unsigned int keylen) { @@ -857,12 +855,11 @@ badkey: /* * talitos_edesc - s/w-extended descriptor - * @assoc_nents: number of segments in associated data scatterlist * @src_nents: number of segments in input scatterlist * @dst_nents: number of segments in output scatterlist - * @assoc_chained: whether assoc is chained or not * @src_chained: whether src is chained or not * @dst_chained: whether dst is chained or not + * @icv_ool: whether ICV is out-of-line * @iv_dma: dma address of iv for checking continuity and link table * @dma_len: length of dma mapped link_tbl space * @dma_link_tbl: bus physical address of link_tbl/buf @@ -875,12 +872,11 @@ badkey: * of link_tbl data */ struct talitos_edesc { - int assoc_nents; int src_nents; int dst_nents; - bool assoc_chained; bool src_chained; bool dst_chained; + bool icv_ool; dma_addr_t iv_dma; int dma_len; dma_addr_t dma_link_tbl; @@ -952,14 +948,6 @@ static void ipsec_esp_unmap(struct device *dev, unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE); - if (edesc->assoc_chained) - talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE); - else if (areq->assoclen) - /* assoc_nents counts also for IV in non-contiguous cases */ - dma_unmap_sg(dev, areq->assoc, - edesc->assoc_nents ? edesc->assoc_nents - 1 : 1, - DMA_TO_DEVICE); - talitos_sg_unmap(dev, edesc, areq->src, areq->dst); if (edesc->dma_len) @@ -976,7 +964,7 @@ static void ipsec_esp_encrypt_done(struct device *dev, { struct aead_request *areq = context; struct crypto_aead *authenc = crypto_aead_reqtfm(areq); - struct talitos_ctx *ctx = crypto_aead_ctx(authenc); + unsigned int authsize = crypto_aead_authsize(authenc); struct talitos_edesc *edesc; struct scatterlist *sg; void *icvdata; @@ -986,13 +974,12 @@ static void ipsec_esp_encrypt_done(struct device *dev, ipsec_esp_unmap(dev, edesc, areq); /* copy the generated ICV to dst */ - if (edesc->dst_nents) { + if (edesc->icv_ool) { icvdata = &edesc->link_tbl[edesc->src_nents + - edesc->dst_nents + 2 + - edesc->assoc_nents]; + edesc->dst_nents + 2]; sg = sg_last(areq->dst, edesc->dst_nents); - memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize, - icvdata, ctx->authsize); + memcpy((char *)sg_virt(sg) + sg->length - authsize, + icvdata, authsize); } kfree(edesc); @@ -1006,10 +993,10 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev, { struct aead_request *req = context; struct crypto_aead *authenc = crypto_aead_reqtfm(req); - struct talitos_ctx *ctx = crypto_aead_ctx(authenc); + unsigned int authsize = crypto_aead_authsize(authenc); struct talitos_edesc *edesc; struct scatterlist *sg; - void *icvdata; + char *oicv, *icv; edesc = container_of(desc, struct talitos_edesc, desc); @@ -1017,16 +1004,18 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev, if (!err) { /* auth check */ - if (edesc->dma_len) - icvdata = &edesc->link_tbl[edesc->src_nents + - edesc->dst_nents + 2 + - edesc->assoc_nents]; - else - icvdata = &edesc->link_tbl[0]; - sg = sg_last(req->dst, edesc->dst_nents ? : 1); - err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length - - ctx->authsize, ctx->authsize) ? -EBADMSG : 0; + icv = (char *)sg_virt(sg) + sg->length - authsize; + + if (edesc->dma_len) { + oicv = (char *)&edesc->link_tbl[edesc->src_nents + + edesc->dst_nents + 2]; + if (edesc->icv_ool) + icv = oicv + authsize; + } else + oicv = (char *)&edesc->link_tbl[0]; + + err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0; } kfree(edesc); @@ -1059,53 +1048,69 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev, * convert scatterlist to SEC h/w link table format * stop at cryptlen bytes */ -static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, - int cryptlen, struct talitos_ptr *link_tbl_ptr) +static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count, + unsigned int offset, int cryptlen, + struct talitos_ptr *link_tbl_ptr) { int n_sg = sg_count; + int count = 0; - while (sg && n_sg--) { - to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg), 0); - link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); - link_tbl_ptr->j_extent = 0; - link_tbl_ptr++; - cryptlen -= sg_dma_len(sg); - sg = sg_next(sg); - } + while (cryptlen && sg && n_sg--) { + unsigned int len = sg_dma_len(sg); + + if (offset >= len) { + offset -= len; + goto next; + } + + len -= offset; + + if (len > cryptlen) + len = cryptlen; - /* adjust (decrease) last one (or two) entry's len to cryptlen */ - link_tbl_ptr--; - while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) { - /* Empty this entry, and move to previous one */ - cryptlen += be16_to_cpu(link_tbl_ptr->len); - link_tbl_ptr->len = 0; - sg_count--; - link_tbl_ptr--; + to_talitos_ptr(link_tbl_ptr + count, + sg_dma_address(sg) + offset, 0); + link_tbl_ptr[count].len = cpu_to_be16(len); + link_tbl_ptr[count].j_extent = 0; + count++; + cryptlen -= len; + offset = 0; + +next: + sg = sg_next(sg); } - link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len) - + cryptlen); /* tag end of link table */ - link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; + if (count > 0) + link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN; - return sg_count; + return count; +} + +static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count, + int cryptlen, + struct talitos_ptr *link_tbl_ptr) +{ + return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen, + link_tbl_ptr); } /* * fill in and submit ipsec_esp descriptor */ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, - u64 seq, void (*callback) (struct device *dev, - struct talitos_desc *desc, - void *context, int error)) + void (*callback)(struct device *dev, + struct talitos_desc *desc, + void *context, int error)) { struct crypto_aead *aead = crypto_aead_reqtfm(areq); + unsigned int authsize = crypto_aead_authsize(aead); struct talitos_ctx *ctx = crypto_aead_ctx(aead); struct device *dev = ctx->dev; struct talitos_desc *desc = &edesc->desc; unsigned int cryptlen = areq->cryptlen; - unsigned int authsize = ctx->authsize; unsigned int ivsize = crypto_aead_ivsize(aead); + int tbl_off = 0; int sg_count, ret; int sg_link_tbl_len; @@ -1113,36 +1118,27 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, DMA_TO_DEVICE); + sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ?: 1, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL + : DMA_TO_DEVICE, + edesc->src_chained); + /* hmac data */ - desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize); - if (edesc->assoc_nents) { - int tbl_off = edesc->src_nents + edesc->dst_nents + 2; - struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; + desc->ptr[1].len = cpu_to_be16(areq->assoclen); + if (sg_count > 1 && + (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0, + areq->assoclen, + &edesc->link_tbl[tbl_off])) > 1) { + tbl_off += ret; to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * sizeof(struct talitos_ptr), 0); desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; - /* assoc_nents - 1 entries for assoc, 1 for IV */ - sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1, - areq->assoclen, tbl_ptr); - - /* add IV to link table */ - tbl_ptr += sg_count - 1; - tbl_ptr->j_extent = 0; - tbl_ptr++; - to_talitos_ptr(tbl_ptr, edesc->iv_dma, 0); - tbl_ptr->len = cpu_to_be16(ivsize); - tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; - dma_sync_single_for_device(dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); } else { - if (areq->assoclen) - to_talitos_ptr(&desc->ptr[1], - sg_dma_address(areq->assoc), 0); - else - to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, 0); + to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0); desc->ptr[1].j_extent = 0; } @@ -1150,8 +1146,6 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0); desc->ptr[2].len = cpu_to_be16(ivsize); desc->ptr[2].j_extent = 0; - /* Sync needed for the aead_givencrypt case */ - dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE); /* cipher key */ map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, @@ -1167,33 +1161,24 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, desc->ptr[4].len = cpu_to_be16(cryptlen); desc->ptr[4].j_extent = authsize; - sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, - (areq->src == areq->dst) ? DMA_BIDIRECTIONAL - : DMA_TO_DEVICE, - edesc->src_chained); - - if (sg_count == 1) { + sg_link_tbl_len = cryptlen; + if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) + sg_link_tbl_len += authsize; + + if (sg_count > 1 && + (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen, + sg_link_tbl_len, + &edesc->link_tbl[tbl_off])) > 1) { + tbl_off += ret; + desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; + to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + + tbl_off * + sizeof(struct talitos_ptr), 0); + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + edesc->dma_len, + DMA_BIDIRECTIONAL); + } else to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0); - } else { - sg_link_tbl_len = cryptlen; - - if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) - sg_link_tbl_len = cryptlen + authsize; - - sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len, - &edesc->link_tbl[0]); - if (sg_count > 1) { - desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; - to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl, 0); - dma_sync_single_for_device(dev, edesc->dma_link_tbl, - edesc->dma_len, - DMA_BIDIRECTIONAL); - } else { - /* Only one segment now, so no link tbl needed */ - to_talitos_ptr(&desc->ptr[4], - sg_dma_address(areq->src), 0); - } - } /* cipher out */ desc->ptr[5].len = cpu_to_be16(cryptlen); @@ -1204,16 +1189,17 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, edesc->dst_nents ? : 1, DMA_FROM_DEVICE, edesc->dst_chained); - if (sg_count == 1) { - to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0); - } else { - int tbl_off = edesc->src_nents + 1; + edesc->icv_ool = false; + + if (sg_count > 1 && + (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count, + areq->assoclen, cryptlen, + &edesc->link_tbl[tbl_off])) > + 1) { struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + tbl_off * sizeof(struct talitos_ptr), 0); - sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, - tbl_ptr); /* Add an entry to the link table for ICV data */ tbl_ptr += sg_count - 1; @@ -1224,13 +1210,16 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, /* icv data follows link tables */ to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + - (tbl_off + edesc->dst_nents + 1 + - edesc->assoc_nents) * - sizeof(struct talitos_ptr), 0); + (edesc->src_nents + edesc->dst_nents + + 2) * sizeof(struct talitos_ptr) + + authsize, 0); desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); - } + + edesc->icv_ool = true; + } else + to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0); /* iv out */ map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, @@ -1268,7 +1257,6 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained) * allocate and map the extended descriptor */ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, - struct scatterlist *assoc, struct scatterlist *src, struct scatterlist *dst, u8 *iv, @@ -1281,8 +1269,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, bool encrypt) { struct talitos_edesc *edesc; - int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len; - bool assoc_chained = false, src_chained = false, dst_chained = false; + int src_nents, dst_nents, alloc_len, dma_len; + bool src_chained = false, dst_chained = false; dma_addr_t iv_dma = 0; gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; @@ -1298,48 +1286,35 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, if (ivsize) iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); - if (assoclen) { - /* - * Currently it is assumed that iv is provided whenever assoc - * is. - */ - BUG_ON(!iv); - - assoc_nents = sg_count(assoc, assoclen, &assoc_chained); - talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE, - assoc_chained); - assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents; - - if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma) - assoc_nents = assoc_nents ? assoc_nents + 1 : 2; - } - if (!dst || dst == src) { - src_nents = sg_count(src, cryptlen + authsize, &src_chained); + src_nents = sg_count(src, assoclen + cryptlen + authsize, + &src_chained); src_nents = (src_nents == 1) ? 0 : src_nents; dst_nents = dst ? src_nents : 0; } else { /* dst && dst != src*/ - src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize), + src_nents = sg_count(src, assoclen + cryptlen + + (encrypt ? 0 : authsize), &src_chained); src_nents = (src_nents == 1) ? 0 : src_nents; - dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0), + dst_nents = sg_count(dst, assoclen + cryptlen + + (encrypt ? authsize : 0), &dst_chained); dst_nents = (dst_nents == 1) ? 0 : dst_nents; } /* * allocate space for base edesc plus the link tables, - * allowing for two separate entries for ICV and generated ICV (+ 2), - * and the ICV data itself + * allowing for two separate entries for AD and generated ICV (+ 2), + * and space for two sets of ICVs (stashed and generated) */ alloc_len = sizeof(struct talitos_edesc); - if (assoc_nents || src_nents || dst_nents) { + if (src_nents || dst_nents) { if (is_sec1) dma_len = (src_nents ? cryptlen : 0) + (dst_nents ? cryptlen : 0); else - dma_len = (src_nents + dst_nents + 2 + assoc_nents) * - sizeof(struct talitos_ptr) + authsize; + dma_len = (src_nents + dst_nents + 2) * + sizeof(struct talitos_ptr) + authsize * 2; alloc_len += dma_len; } else { dma_len = 0; @@ -1348,13 +1323,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, edesc = kmalloc(alloc_len, GFP_DMA | flags); if (!edesc) { - if (assoc_chained) - talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE); - else if (assoclen) - dma_unmap_sg(dev, assoc, - assoc_nents ? assoc_nents - 1 : 1, - DMA_TO_DEVICE); - if (iv_dma) dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); @@ -1362,10 +1330,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, return ERR_PTR(-ENOMEM); } - edesc->assoc_nents = assoc_nents; edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; - edesc->assoc_chained = assoc_chained; edesc->src_chained = src_chained; edesc->dst_chained = dst_chained; edesc->iv_dma = iv_dma; @@ -1382,12 +1348,13 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, int icv_stashing, bool encrypt) { struct crypto_aead *authenc = crypto_aead_reqtfm(areq); + unsigned int authsize = crypto_aead_authsize(authenc); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); unsigned int ivsize = crypto_aead_ivsize(authenc); - return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst, + return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, iv, areq->assoclen, areq->cryptlen, - ctx->authsize, ivsize, icv_stashing, + authsize, ivsize, icv_stashing, areq->base.flags, encrypt); } @@ -1405,14 +1372,14 @@ static int aead_encrypt(struct aead_request *req) /* set encrypt */ edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; - return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done); + return ipsec_esp(edesc, req, ipsec_esp_encrypt_done); } static int aead_decrypt(struct aead_request *req) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); + unsigned int authsize = crypto_aead_authsize(authenc); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); - unsigned int authsize = ctx->authsize; struct talitos_private *priv = dev_get_drvdata(ctx->dev); struct talitos_edesc *edesc; struct scatterlist *sg; @@ -1437,7 +1404,7 @@ static int aead_decrypt(struct aead_request *req) /* reset integrity check result bits */ edesc->desc.hdr_lo = 0; - return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done); + return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done); } /* Have to check the ICV with software */ @@ -1445,40 +1412,16 @@ static int aead_decrypt(struct aead_request *req) /* stash incoming ICV for later cmp with ICV generated by the h/w */ if (edesc->dma_len) - icvdata = &edesc->link_tbl[edesc->src_nents + - edesc->dst_nents + 2 + - edesc->assoc_nents]; + icvdata = (char *)&edesc->link_tbl[edesc->src_nents + + edesc->dst_nents + 2]; else icvdata = &edesc->link_tbl[0]; sg = sg_last(req->src, edesc->src_nents ? : 1); - memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, - ctx->authsize); + memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize); - return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done); -} - -static int aead_givencrypt(struct aead_givcrypt_request *req) -{ - struct aead_request *areq = &req->areq; - struct crypto_aead *authenc = crypto_aead_reqtfm(areq); - struct talitos_ctx *ctx = crypto_aead_ctx(authenc); - struct talitos_edesc *edesc; - - /* allocate extended descriptor */ - edesc = aead_edesc_alloc(areq, req->giv, 0, true); - if (IS_ERR(edesc)) - return PTR_ERR(edesc); - - /* set encrypt */ - edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; - - memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc)); - /* avoid consecutive packets going out with same IV */ - *(__be64 *)req->giv ^= cpu_to_be64(req->seq); - - return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done); + return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done); } static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, @@ -1710,7 +1653,7 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); - return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst, + return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->info, 0, areq->nbytes, 0, ivsize, 0, areq->base.flags, encrypt); } @@ -1895,7 +1838,7 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq, struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); - return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0, + return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0, nbytes, 0, 0, 0, areq->base.flags, false); } @@ -1986,7 +1929,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) sg_init_table(req_ctx->bufsl, nsg); sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf); if (nsg > 1) - scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src); + sg_chain(req_ctx->bufsl, 2, areq->src); req_ctx->psrc = req_ctx->bufsl; } else req_ctx->psrc = areq->src; @@ -2161,6 +2104,7 @@ struct talitos_alg_template { union { struct crypto_alg crypto; struct ahash_alg hash; + struct aead_alg aead; } alg; __be32 desc_hdr_template; }; @@ -2168,15 +2112,16 @@ struct talitos_alg_template { static struct talitos_alg_template driver_algs[] = { /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ { .type = CRYPTO_ALG_TYPE_AEAD, - .alg.crypto = { - .cra_name = "authenc(hmac(sha1),cbc(aes))", - .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos", - .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, - .cra_aead = { - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = SHA1_DIGEST_SIZE, - } + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_ASYNC, + }, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_AESU | @@ -2187,15 +2132,17 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_SHA1_HMAC, }, { .type = CRYPTO_ALG_TYPE_AEAD, - .alg.crypto = { - .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", - .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos", - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, - .cra_aead = { - .ivsize = DES3_EDE_BLOCK_SIZE, - .maxauthsize = SHA1_DIGEST_SIZE, - } + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha1)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_ASYNC, + }, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_DEU | @@ -2207,15 +2154,16 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_SHA1_HMAC, }, { .type = CRYPTO_ALG_TYPE_AEAD, - .alg.crypto = { - .cra_name = "authenc(hmac(sha224),cbc(aes))", - .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos", - .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, - .cra_aead = { - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = SHA224_DIGEST_SIZE, - } + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha224),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha224-" + "cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_ASYNC, + }, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_AESU | @@ -2226,15 +2174,17 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_SHA224_HMAC, }, { .type = CRYPTO_ALG_TYPE_AEAD, - .alg.crypto = { - .cra_name = "authenc(hmac(sha224),cbc(des3_ede))", - .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos", - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, - .cra_aead = { - .ivsize = DES3_EDE_BLOCK_SIZE, - .maxauthsize = SHA224_DIGEST_SIZE, - } + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha224)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha224-" + "cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_ASYNC, + }, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_DEU | @@ -2246,15 +2196,16 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_SHA224_HMAC, }, { .type = CRYPTO_ALG_TYPE_AEAD, - .alg.crypto = { - .cra_name = "authenc(hmac(sha256),cbc(aes))", - .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos", - .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, - .cra_aead = { - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = SHA256_DIGEST_SIZE, - } + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_ASYNC, + }, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_AESU | @@ -2265,15 +2216,17 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_SHA256_HMAC, }, { .type = CRYPTO_ALG_TYPE_AEAD, - .alg.crypto = { - .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", - .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos", - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, - .cra_aead = { - .ivsize = DES3_EDE_BLOCK_SIZE, - .maxauthsize = SHA256_DIGEST_SIZE, - } + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha256)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_ASYNC, + }, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_DEU | @@ -2285,15 +2238,16 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_SHA256_HMAC, }, { .type = CRYPTO_ALG_TYPE_AEAD, - .alg.crypto = { - .cra_name = "authenc(hmac(sha384),cbc(aes))", - .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos", - .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, - .cra_aead = { - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = SHA384_DIGEST_SIZE, - } + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha384),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha384-" + "cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_ASYNC, + }, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_AESU | @@ -2304,15 +2258,17 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEUB_SHA384_HMAC, }, { .type = CRYPTO_ALG_TYPE_AEAD, - .alg.crypto = { - .cra_name = "authenc(hmac(sha384),cbc(des3_ede))", - .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos", - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, - .cra_aead = { - .ivsize = DES3_EDE_BLOCK_SIZE, - .maxauthsize = SHA384_DIGEST_SIZE, - } + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha384)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha384-" + "cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_ASYNC, + }, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_DEU | @@ -2324,15 +2280,16 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEUB_SHA384_HMAC, }, { .type = CRYPTO_ALG_TYPE_AEAD, - .alg.crypto = { - .cra_name = "authenc(hmac(sha512),cbc(aes))", - .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos", - .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, - .cra_aead = { - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = SHA512_DIGEST_SIZE, - } + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha512),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha512-" + "cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_ASYNC, + }, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_AESU | @@ -2343,15 +2300,17 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEUB_SHA512_HMAC, }, { .type = CRYPTO_ALG_TYPE_AEAD, - .alg.crypto = { - .cra_name = "authenc(hmac(sha512),cbc(des3_ede))", - .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos", - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, - .cra_aead = { - .ivsize = DES3_EDE_BLOCK_SIZE, - .maxauthsize = SHA512_DIGEST_SIZE, - } + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha512)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha512-" + "cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_ASYNC, + }, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_DEU | @@ -2363,15 +2322,16 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEUB_SHA512_HMAC, }, { .type = CRYPTO_ALG_TYPE_AEAD, - .alg.crypto = { - .cra_name = "authenc(hmac(md5),cbc(aes))", - .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", - .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, - .cra_aead = { - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = MD5_DIGEST_SIZE, - } + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(md5),cbc(aes))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_ASYNC, + }, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_AESU | @@ -2382,15 +2342,16 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_MD5_HMAC, }, { .type = CRYPTO_ALG_TYPE_AEAD, - .alg.crypto = { - .cra_name = "authenc(hmac(md5),cbc(des3_ede))", - .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos", - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, - .cra_aead = { - .ivsize = DES3_EDE_BLOCK_SIZE, - .maxauthsize = MD5_DIGEST_SIZE, - } + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(md5),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_ASYNC, + }, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, }, .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | DESC_HDR_SEL0_DEU | @@ -2658,15 +2619,9 @@ static int talitos_cra_init(struct crypto_tfm *tfm) return 0; } -static int talitos_cra_init_aead(struct crypto_tfm *tfm) +static int talitos_cra_init_aead(struct crypto_aead *tfm) { - struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); - - talitos_cra_init(tfm); - - /* random first IV */ - get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH); - + talitos_cra_init(crypto_aead_tfm(tfm)); return 0; } @@ -2713,9 +2668,9 @@ static int talitos_remove(struct platform_device *ofdev) list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { switch (t_alg->algt.type) { case CRYPTO_ALG_TYPE_ABLKCIPHER: - case CRYPTO_ALG_TYPE_AEAD: - crypto_unregister_alg(&t_alg->algt.alg.crypto); break; + case CRYPTO_ALG_TYPE_AEAD: + crypto_unregister_aead(&t_alg->algt.alg.aead); case CRYPTO_ALG_TYPE_AHASH: crypto_unregister_ahash(&t_alg->algt.alg.hash); break; @@ -2727,7 +2682,7 @@ static int talitos_remove(struct platform_device *ofdev) if (hw_supports(dev, DESC_HDR_SEL0_RNG)) talitos_unregister_rng(dev); - for (i = 0; i < priv->num_channels; i++) + for (i = 0; priv->chan && i < priv->num_channels; i++) kfree(priv->chan[i].fifo); kfree(priv->chan); @@ -2774,15 +2729,11 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, alg->cra_ablkcipher.geniv = "eseqiv"; break; case CRYPTO_ALG_TYPE_AEAD: - alg = &t_alg->algt.alg.crypto; - alg->cra_init = talitos_cra_init_aead; - alg->cra_type = &crypto_aead_type; - alg->cra_aead.setkey = aead_setkey; - alg->cra_aead.setauthsize = aead_setauthsize; - alg->cra_aead.encrypt = aead_encrypt; - alg->cra_aead.decrypt = aead_decrypt; - alg->cra_aead.givencrypt = aead_givencrypt; - alg->cra_aead.geniv = ""; + alg = &t_alg->algt.alg.aead.base; + t_alg->algt.alg.aead.init = talitos_cra_init_aead; + t_alg->algt.alg.aead.setkey = aead_setkey; + t_alg->algt.alg.aead.encrypt = aead_encrypt; + t_alg->algt.alg.aead.decrypt = aead_decrypt; break; case CRYPTO_ALG_TYPE_AHASH: alg = &t_alg->algt.alg.hash.halg.base; @@ -3041,7 +2992,7 @@ static int talitos_probe(struct platform_device *ofdev) for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { struct talitos_crypto_alg *t_alg; - char *name = NULL; + struct crypto_alg *alg = NULL; t_alg = talitos_alg_alloc(dev, &driver_algs[i]); if (IS_ERR(t_alg)) { @@ -3053,21 +3004,26 @@ static int talitos_probe(struct platform_device *ofdev) switch (t_alg->algt.type) { case CRYPTO_ALG_TYPE_ABLKCIPHER: - case CRYPTO_ALG_TYPE_AEAD: err = crypto_register_alg( &t_alg->algt.alg.crypto); - name = t_alg->algt.alg.crypto.cra_driver_name; + alg = &t_alg->algt.alg.crypto; break; + + case CRYPTO_ALG_TYPE_AEAD: + err = crypto_register_aead( + &t_alg->algt.alg.aead); + alg = &t_alg->algt.alg.aead.base; + break; + case CRYPTO_ALG_TYPE_AHASH: err = crypto_register_ahash( &t_alg->algt.alg.hash); - name = - t_alg->algt.alg.hash.halg.base.cra_driver_name; + alg = &t_alg->algt.alg.hash.halg.base; break; } if (err) { dev_err(dev, "%s alg registration failed\n", - name); + alg->cra_driver_name); kfree(t_alg); } else list_add_tail(&t_alg->entry, &priv->alg_list); diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index 314daf55e..0090f3211 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -52,12 +52,7 @@ struct talitos_ptr { __be32 ptr; /* address */ }; -static const struct talitos_ptr zero_entry = { - .len = 0, - .j_extent = 0, - .eptr = 0, - .ptr = 0 -}; +static const struct talitos_ptr zero_entry; /* descriptor */ struct talitos_desc { @@ -154,6 +149,7 @@ struct talitos_private { /* hwrng device */ struct hwrng rng; + bool rng_registered; }; extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, diff --git a/drivers/crypto/vmx/ppc-xlate.pl b/drivers/crypto/vmx/ppc-xlate.pl index a59188494..b9997335f 100644 --- a/drivers/crypto/vmx/ppc-xlate.pl +++ b/drivers/crypto/vmx/ppc-xlate.pl @@ -169,6 +169,7 @@ my $vpmsumd = sub { vcrypto_op(@_, 1224); }; my $vpmsubh = sub { vcrypto_op(@_, 1096); }; my $vpmsumw = sub { vcrypto_op(@_, 1160); }; my $vaddudm = sub { vcrypto_op(@_, 192); }; +my $vadduqm = sub { vcrypto_op(@_, 256); }; my $mtsle = sub { my ($f, $arg) = @_; diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index ca1b362d7..ca848cc6a 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -53,7 +53,7 @@ static struct devfreq *find_device_devfreq(struct device *dev) { struct devfreq *tmp_devfreq; - if (unlikely(IS_ERR_OR_NULL(dev))) { + if (IS_ERR_OR_NULL(dev)) { pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); return ERR_PTR(-EINVAL); } @@ -133,7 +133,7 @@ static struct devfreq_governor *find_devfreq_governor(const char *name) { struct devfreq_governor *tmp_governor; - if (unlikely(IS_ERR_OR_NULL(name))) { + if (IS_ERR_OR_NULL(name)) { pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); return ERR_PTR(-EINVAL); } @@ -177,10 +177,10 @@ int update_devfreq(struct devfreq *devfreq) return err; /* - * Adjust the freuqency with user freq and QoS. + * Adjust the frequency with user freq and QoS. * - * List from the highest proiority - * max_freq (probably called by thermal when it's too hot) + * List from the highest priority + * max_freq * min_freq */ @@ -482,7 +482,7 @@ struct devfreq *devfreq_add_device(struct device *dev, devfreq->profile->max_state * devfreq->profile->max_state, GFP_KERNEL); - devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) * + devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) * devfreq->profile->max_state, GFP_KERNEL); devfreq->last_stat_updated = jiffies; @@ -492,7 +492,7 @@ struct devfreq *devfreq_add_device(struct device *dev, if (err) { put_device(&devfreq->dev); mutex_unlock(&devfreq->lock); - goto err_dev; + goto err_out; } mutex_unlock(&devfreq->lock); @@ -518,7 +518,6 @@ struct devfreq *devfreq_add_device(struct device *dev, err_init: list_del(&devfreq->node); device_unregister(&devfreq->dev); -err_dev: kfree(devfreq); err_out: return ERR_PTR(err); @@ -795,8 +794,10 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr, ret = PTR_ERR(governor); goto out; } - if (df->governor == governor) + if (df->governor == governor) { + ret = 0; goto out; + } if (df->governor) { ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c index 7d99d13ba..f312485f1 100644 --- a/drivers/devfreq/event/exynos-ppmu.c +++ b/drivers/devfreq/event/exynos-ppmu.c @@ -1,7 +1,7 @@ /* * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support * - * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * Copyright (c) 2014-2015 Samsung Electronics Co., Ltd. * Author : Chanwoo Choi * * This program is free software; you can redistribute it and/or modify @@ -82,6 +82,15 @@ struct __exynos_ppmu_events { PPMU_EVENT(mscl), PPMU_EVENT(fimd0x), PPMU_EVENT(fimd1x), + + /* Only for Exynos5433 SoCs */ + PPMU_EVENT(d0-cpu), + PPMU_EVENT(d0-general), + PPMU_EVENT(d0-rt), + PPMU_EVENT(d1-cpu), + PPMU_EVENT(d1-general), + PPMU_EVENT(d1-rt), + { /* sentinel */ }, }; @@ -96,6 +105,9 @@ static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev) return -EINVAL; } +/* + * The devfreq-event ops structure for PPMU v1.1 + */ static int exynos_ppmu_disable(struct devfreq_event_dev *edev) { struct exynos_ppmu *info = devfreq_event_get_drvdata(edev); @@ -200,10 +212,159 @@ static const struct devfreq_event_ops exynos_ppmu_ops = { .get_event = exynos_ppmu_get_event, }; +/* + * The devfreq-event ops structure for PPMU v2.0 + */ +static int exynos_ppmu_v2_disable(struct devfreq_event_dev *edev) +{ + struct exynos_ppmu *info = devfreq_event_get_drvdata(edev); + u32 pmnc, clear; + + /* Disable all counters */ + clear = (PPMU_CCNT_MASK | PPMU_PMCNT0_MASK | PPMU_PMCNT1_MASK + | PPMU_PMCNT2_MASK | PPMU_PMCNT3_MASK); + + __raw_writel(clear, info->ppmu.base + PPMU_V2_FLAG); + __raw_writel(clear, info->ppmu.base + PPMU_V2_INTENC); + __raw_writel(clear, info->ppmu.base + PPMU_V2_CNTENC); + __raw_writel(clear, info->ppmu.base + PPMU_V2_CNT_RESET); + + __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG0); + __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG1); + __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG2); + __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_RESULT); + __raw_writel(0x0, info->ppmu.base + PPMU_V2_CNT_AUTO); + __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV0_TYPE); + __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV1_TYPE); + __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV2_TYPE); + __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV3_TYPE); + __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_V); + __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_A); + __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_V); + __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_A); + __raw_writel(0x0, info->ppmu.base + PPMU_V2_INTERRUPT_RESET); + + /* Disable PPMU */ + pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC); + pmnc &= ~PPMU_PMNC_ENABLE_MASK; + __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC); + + return 0; +} + +static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev) +{ + struct exynos_ppmu *info = devfreq_event_get_drvdata(edev); + int id = exynos_ppmu_find_ppmu_id(edev); + u32 pmnc, cntens; + + /* Enable all counters */ + cntens = __raw_readl(info->ppmu.base + PPMU_V2_CNTENS); + cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id)); + __raw_writel(cntens, info->ppmu.base + PPMU_V2_CNTENS); + + /* Set the event of Read/Write data count */ + switch (id) { + case PPMU_PMNCNT0: + case PPMU_PMNCNT1: + case PPMU_PMNCNT2: + __raw_writel(PPMU_V2_RO_DATA_CNT | PPMU_V2_WO_DATA_CNT, + info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id)); + break; + case PPMU_PMNCNT3: + __raw_writel(PPMU_V2_EVT3_RW_DATA_CNT, + info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id)); + break; + } + + /* Reset cycle counter/performance counter and enable PPMU */ + pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC); + pmnc &= ~(PPMU_PMNC_ENABLE_MASK + | PPMU_PMNC_COUNTER_RESET_MASK + | PPMU_PMNC_CC_RESET_MASK + | PPMU_PMNC_CC_DIVIDER_MASK + | PPMU_V2_PMNC_START_MODE_MASK); + pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT); + pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT); + pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT); + pmnc |= (PPMU_V2_MODE_MANUAL << PPMU_V2_PMNC_START_MODE_SHIFT); + __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC); + + return 0; +} + +static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev, + struct devfreq_event_data *edata) +{ + struct exynos_ppmu *info = devfreq_event_get_drvdata(edev); + int id = exynos_ppmu_find_ppmu_id(edev); + u32 pmnc, cntenc; + u32 pmcnt_high, pmcnt_low; + u64 load_count = 0; + + /* Disable PPMU */ + pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC); + pmnc &= ~PPMU_PMNC_ENABLE_MASK; + __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC); + + /* Read cycle count and performance count */ + edata->total_count = __raw_readl(info->ppmu.base + PPMU_V2_CCNT); + + switch (id) { + case PPMU_PMNCNT0: + case PPMU_PMNCNT1: + case PPMU_PMNCNT2: + load_count = __raw_readl(info->ppmu.base + PPMU_V2_PMNCT(id)); + break; + case PPMU_PMNCNT3: + pmcnt_high = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_HIGH); + pmcnt_low = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_LOW); + load_count = ((u64)((pmcnt_high & 0xff)) << 32) + + (u64)pmcnt_low; + break; + } + edata->load_count = load_count; + + /* Disable all counters */ + cntenc = __raw_readl(info->ppmu.base + PPMU_V2_CNTENC); + cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id)); + __raw_writel(cntenc, info->ppmu.base + PPMU_V2_CNTENC); + + dev_dbg(&edev->dev, "%25s (load: %ld / %ld)\n", edev->desc->name, + edata->load_count, edata->total_count); + return 0; +} + +static const struct devfreq_event_ops exynos_ppmu_v2_ops = { + .disable = exynos_ppmu_v2_disable, + .set_event = exynos_ppmu_v2_set_event, + .get_event = exynos_ppmu_v2_get_event, +}; + +static const struct of_device_id exynos_ppmu_id_match[] = { + { + .compatible = "samsung,exynos-ppmu", + .data = (void *)&exynos_ppmu_ops, + }, { + .compatible = "samsung,exynos-ppmu-v2", + .data = (void *)&exynos_ppmu_v2_ops, + }, + { /* sentinel */ }, +}; + +static struct devfreq_event_ops *exynos_bus_get_ops(struct device_node *np) +{ + const struct of_device_id *match; + + match = of_match_node(exynos_ppmu_id_match, np); + return (struct devfreq_event_ops *)match->data; +} + static int of_get_devfreq_events(struct device_node *np, struct exynos_ppmu *info) { struct devfreq_event_desc *desc; + struct devfreq_event_ops *event_ops; struct device *dev = info->dev; struct device_node *events_np, *node; int i, j, count; @@ -214,6 +375,7 @@ static int of_get_devfreq_events(struct device_node *np, "failed to get child node of devfreq-event devices\n"); return -EINVAL; } + event_ops = exynos_bus_get_ops(np); count = of_get_child_count(events_np); desc = devm_kzalloc(dev, sizeof(*desc) * count, GFP_KERNEL); @@ -238,7 +400,7 @@ static int of_get_devfreq_events(struct device_node *np, continue; } - desc[j].ops = &exynos_ppmu_ops; + desc[j].ops = event_ops; desc[j].driver_data = info; of_property_read_string(node, "event-name", &desc[j].name); @@ -354,11 +516,6 @@ static int exynos_ppmu_remove(struct platform_device *pdev) return 0; } -static struct of_device_id exynos_ppmu_id_match[] = { - { .compatible = "samsung,exynos-ppmu", }, - { /* sentinel */ }, -}; - static struct platform_driver exynos_ppmu_driver = { .probe = exynos_ppmu_probe, .remove = exynos_ppmu_remove, diff --git a/drivers/devfreq/event/exynos-ppmu.h b/drivers/devfreq/event/exynos-ppmu.h index 4e831d48c..05774c449 100644 --- a/drivers/devfreq/event/exynos-ppmu.h +++ b/drivers/devfreq/event/exynos-ppmu.h @@ -26,6 +26,9 @@ enum ppmu_counter { PPMU_PMNCNT_MAX, }; +/*** + * PPMUv1.1 Definitions + */ enum ppmu_event_type { PPMU_RO_BUSY_CYCLE_CNT = 0x0, PPMU_WO_BUSY_CYCLE_CNT = 0x1, @@ -90,4 +93,71 @@ enum ppmu_reg { #define PPMU_PMNCT(x) (PPMU_PMCNT0 + (0x10 * x)) #define PPMU_BEVTxSEL(x) (PPMU_BEVT0SEL + (0x100 * x)) +/*** + * PPMU_V2.0 definitions + */ +enum ppmu_v2_mode { + PPMU_V2_MODE_MANUAL = 0, + PPMU_V2_MODE_AUTO = 1, + PPMU_V2_MODE_CIG = 2, /* CIG (Conditional Interrupt Generation) */ +}; + +enum ppmu_v2_event_type { + PPMU_V2_RO_DATA_CNT = 0x4, + PPMU_V2_WO_DATA_CNT = 0x5, + + PPMU_V2_EVT3_RW_DATA_CNT = 0x22, /* Only for Event3 */ +}; + +enum ppmu_V2_reg { + /* PPC control register */ + PPMU_V2_PMNC = 0x04, + PPMU_V2_CNTENS = 0x08, + PPMU_V2_CNTENC = 0x0c, + PPMU_V2_INTENS = 0x10, + PPMU_V2_INTENC = 0x14, + PPMU_V2_FLAG = 0x18, + + /* Cycle Counter and Performance Event Counter Register */ + PPMU_V2_CCNT = 0x48, + PPMU_V2_PMCNT0 = 0x34, + PPMU_V2_PMCNT1 = 0x38, + PPMU_V2_PMCNT2 = 0x3c, + PPMU_V2_PMCNT3_LOW = 0x40, + PPMU_V2_PMCNT3_HIGH = 0x44, + + /* Bus Event Generator */ + PPMU_V2_CIG_CFG0 = 0x1c, + PPMU_V2_CIG_CFG1 = 0x20, + PPMU_V2_CIG_CFG2 = 0x24, + PPMU_V2_CIG_RESULT = 0x28, + PPMU_V2_CNT_RESET = 0x2c, + PPMU_V2_CNT_AUTO = 0x30, + PPMU_V2_CH_EV0_TYPE = 0x200, + PPMU_V2_CH_EV1_TYPE = 0x204, + PPMU_V2_CH_EV2_TYPE = 0x208, + PPMU_V2_CH_EV3_TYPE = 0x20c, + PPMU_V2_SM_ID_V = 0x220, + PPMU_V2_SM_ID_A = 0x224, + PPMU_V2_SM_OTHERS_V = 0x228, + PPMU_V2_SM_OTHERS_A = 0x22c, + PPMU_V2_INTERRUPT_RESET = 0x260, +}; + +/* PMNC register */ +#define PPMU_V2_PMNC_START_MODE_SHIFT 20 +#define PPMU_V2_PMNC_START_MODE_MASK (0x3 << PPMU_V2_PMNC_START_MODE_SHIFT) + +#define PPMU_PMNC_CC_RESET_SHIFT 2 +#define PPMU_PMNC_COUNTER_RESET_SHIFT 1 +#define PPMU_PMNC_ENABLE_SHIFT 0 +#define PPMU_PMNC_START_MODE_MASK BIT(16) +#define PPMU_PMNC_CC_DIVIDER_MASK BIT(3) +#define PPMU_PMNC_CC_RESET_MASK BIT(2) +#define PPMU_PMNC_COUNTER_RESET_MASK BIT(1) +#define PPMU_PMNC_ENABLE_MASK BIT(0) + +#define PPMU_V2_PMNCT(x) (PPMU_V2_PMCNT0 + (0x4 * x)) +#define PPMU_V2_CH_EVx_TYPE(x) (PPMU_V2_CH_EV0_TYPE + (0x4 * x)) + #endif /* __EXYNOS_PPMU_H__ */ diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c index 0720ba84c..ae72ba5e7 100644 --- a/drivers/devfreq/governor_simpleondemand.c +++ b/drivers/devfreq/governor_simpleondemand.c @@ -21,17 +21,20 @@ static int devfreq_simple_ondemand_func(struct devfreq *df, unsigned long *freq) { - struct devfreq_dev_status stat; - int err = df->profile->get_dev_status(df->dev.parent, &stat); + int err; + struct devfreq_dev_status *stat; unsigned long long a, b; unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD; unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL; struct devfreq_simple_ondemand_data *data = df->data; unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX; + err = devfreq_update_stats(df); if (err) return err; + stat = &df->last_status; + if (data) { if (data->upthreshold) dfso_upthreshold = data->upthreshold; @@ -43,41 +46,41 @@ static int devfreq_simple_ondemand_func(struct devfreq *df, return -EINVAL; /* Assume MAX if it is going to be divided by zero */ - if (stat.total_time == 0) { + if (stat->total_time == 0) { *freq = max; return 0; } /* Prevent overflow */ - if (stat.busy_time >= (1 << 24) || stat.total_time >= (1 << 24)) { - stat.busy_time >>= 7; - stat.total_time >>= 7; + if (stat->busy_time >= (1 << 24) || stat->total_time >= (1 << 24)) { + stat->busy_time >>= 7; + stat->total_time >>= 7; } /* Set MAX if it's busy enough */ - if (stat.busy_time * 100 > - stat.total_time * dfso_upthreshold) { + if (stat->busy_time * 100 > + stat->total_time * dfso_upthreshold) { *freq = max; return 0; } /* Set MAX if we do not know the initial frequency */ - if (stat.current_frequency == 0) { + if (stat->current_frequency == 0) { *freq = max; return 0; } /* Keep the current frequency */ - if (stat.busy_time * 100 > - stat.total_time * (dfso_upthreshold - dfso_downdifferential)) { - *freq = stat.current_frequency; + if (stat->busy_time * 100 > + stat->total_time * (dfso_upthreshold - dfso_downdifferential)) { + *freq = stat->current_frequency; return 0; } /* Set the desired frequency based on the load */ - a = stat.busy_time; - a *= stat.current_frequency; - b = div_u64(a, stat.total_time); + a = stat->busy_time; + a *= stat->current_frequency; + b = div_u64(a, stat->total_time); b *= 100; b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2)); *freq = (unsigned long) b; diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c index 13a1a6e81..848b93ee9 100644 --- a/drivers/devfreq/tegra-devfreq.c +++ b/drivers/devfreq/tegra-devfreq.c @@ -541,18 +541,20 @@ static struct devfreq_dev_profile tegra_devfreq_profile = { static int tegra_governor_get_target(struct devfreq *devfreq, unsigned long *freq) { - struct devfreq_dev_status stat; + struct devfreq_dev_status *stat; struct tegra_devfreq *tegra; struct tegra_devfreq_device *dev; unsigned long target_freq = 0; unsigned int i; int err; - err = devfreq->profile->get_dev_status(devfreq->dev.parent, &stat); + err = devfreq_update_stats(devfreq); if (err) return err; - tegra = stat.private_data; + stat = &devfreq->last_status; + + tegra = stat->private_data; for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { dev = &tegra->devices[i]; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 88d474b78..b4584757d 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -33,27 +33,29 @@ if DMADEVICES comment "DMA Devices" -config INTEL_MIC_X100_DMA - tristate "Intel MIC X100 DMA Driver" - depends on 64BIT && X86 && INTEL_MIC_BUS - select DMA_ENGINE - help - This enables DMA support for the Intel Many Integrated Core - (MIC) family of PCIe form factor coprocessor X100 devices that - run a 64 bit Linux OS. This driver will be used by both MIC - host and card drivers. - - If you are building host kernel with a MIC device or a card - kernel for a MIC device, then say M (recommended) or Y, else - say N. If unsure say N. +#core +config ASYNC_TX_ENABLE_CHANNEL_SWITCH + bool - More information about the Intel MIC family as well as the Linux - OS and tools for MIC to use with this driver are available from - . +config ARCH_HAS_ASYNC_TX_FIND_CHANNEL + bool -config ASYNC_TX_ENABLE_CHANNEL_SWITCH +config DMA_ENGINE bool +config DMA_VIRTUAL_CHANNELS + tristate + +config DMA_ACPI + def_bool y + depends on ACPI + +config DMA_OF + def_bool y + depends on OF + select DMA_ENGINE + +#devices config AMBA_PL08X bool "ARM PrimeCell PL080 or PL081 support" depends on ARM_AMBA @@ -63,29 +65,15 @@ config AMBA_PL08X Platform has a PL08x DMAC device which can provide DMA engine support -config INTEL_IOATDMA - tristate "Intel I/OAT DMA support" - depends on PCI && X86 +config AMCC_PPC440SPE_ADMA + tristate "AMCC PPC440SPe ADMA support" + depends on 440SPe || 440SP select DMA_ENGINE select DMA_ENGINE_RAID - select DCA - help - Enable support for the Intel(R) I/OAT DMA engine present - in recent Intel Xeon chipsets. - - Say Y here if you have such a chipset. - - If unsure, say N. - -config INTEL_IOP_ADMA - tristate "Intel IOP ADMA support" - depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX - select DMA_ENGINE + select ARCH_HAS_ASYNC_TX_FIND_CHANNEL select ASYNC_TX_ENABLE_CHANNEL_SWITCH help - Enable support for the Intel(R) IOP Series RAID engines. - -source "drivers/dma/dw/Kconfig" + Enable support for the AMCC PPC440SPe RAID engines. config AT_HDMAC tristate "Atmel AHB DMA support" @@ -101,6 +89,89 @@ config AT_XDMAC help Support the Atmel XDMA controller. +config AXI_DMAC + tristate "Analog Devices AXI-DMAC DMA support" + depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_SOCFPGA || COMPILE_TEST + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Enable support for the Analog Devices AXI-DMAC peripheral. This DMA + controller is often used in Analog Device's reference designs for FPGA + platforms. + +config COH901318 + bool "ST-Ericsson COH901318 DMA support" + select DMA_ENGINE + depends on ARCH_U300 + help + Enable support for ST-Ericsson COH 901 318 DMA. + +config DMA_BCM2835 + tristate "BCM2835 DMA engine support" + depends on ARCH_BCM2835 + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + +config DMA_JZ4740 + tristate "JZ4740 DMA support" + depends on MACH_JZ4740 + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + +config DMA_JZ4780 + tristate "JZ4780 DMA support" + depends on MACH_JZ4780 + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + This selects support for the DMA controller in Ingenic JZ4780 SoCs. + If you have a board based on such a SoC and wish to use DMA for + devices which can use the DMA controller, say Y or M here. + +config DMA_OMAP + tristate "OMAP DMA support" + depends on ARCH_OMAP + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + select TI_DMA_CROSSBAR if SOC_DRA7XX + +config DMA_SA11X0 + tristate "SA-11x0 DMA support" + depends on ARCH_SA1100 + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Support the DMA engine found on Intel StrongARM SA-1100 and + SA-1110 SoCs. This DMA engine can only be used with on-chip + devices. + +config DMA_SUN4I + tristate "Allwinner A10 DMA SoCs support" + depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I + default (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I) + select DMA_ENGINE + select DMA_OF + select DMA_VIRTUAL_CHANNELS + help + Enable support for the DMA controller present in the sun4i, + sun5i and sun7i Allwinner ARM SoCs. + +config DMA_SUN6I + tristate "Allwinner A31 SoCs DMA support" + depends on MACH_SUN6I || MACH_SUN8I || COMPILE_TEST + depends on RESET_CONTROLLER + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Support for the DMA engine first found in Allwinner A31 SoCs. + +config EP93XX_DMA + bool "Cirrus Logic EP93xx DMA support" + depends on ARCH_EP93XX + select DMA_ENGINE + help + Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller. + config FSL_DMA tristate "Freescale Elo series DMA support" depends on FSL_SOC @@ -112,6 +183,16 @@ config FSL_DMA EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on some Txxx and Bxxx parts. +config FSL_EDMA + tristate "Freescale eDMA engine support" + depends on OF + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Support the Freescale eDMA engine with programmable channel + multiplexing capability for DMA request sources(slot). + This module can be found on Freescale Vybrid and LS-1 SoCs. + config FSL_RAID tristate "Freescale RAID engine Support" depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH @@ -123,153 +204,175 @@ config FSL_RAID the capability to offload memcpy, xor and pq computation for raid5/6. -source "drivers/dma/hsu/Kconfig" - -config MPC512X_DMA - tristate "Freescale MPC512x built-in DMA engine support" - depends on PPC_MPC512x || PPC_MPC831x +config IMG_MDC_DMA + tristate "IMG MDC support" + depends on MIPS || COMPILE_TEST + depends on MFD_SYSCON select DMA_ENGINE - ---help--- - Enable support for the Freescale MPC512x built-in DMA engine. - -source "drivers/dma/bestcomm/Kconfig" + select DMA_VIRTUAL_CHANNELS + help + Enable support for the IMG multi-threaded DMA controller (MDC). -config MV_XOR - bool "Marvell XOR engine support" - depends on PLAT_ORION +config IMX_DMA + tristate "i.MX DMA support" + depends on ARCH_MXC select DMA_ENGINE - select DMA_ENGINE_RAID - select ASYNC_TX_ENABLE_CHANNEL_SWITCH - ---help--- - Enable support for the Marvell XOR engine. + help + Support the i.MX DMA engine. This engine is integrated into + Freescale i.MX1/21/27 chips. -config MX3_IPU - bool "MX3x Image Processing Unit support" +config IMX_SDMA + tristate "i.MX SDMA support" depends on ARCH_MXC select DMA_ENGINE - default y help - If you plan to use the Image Processing unit in the i.MX3x, say - Y here. If unsure, select Y. + Support the i.MX SDMA engine. This engine is integrated into + Freescale i.MX25/31/35/51/53/6 chips. -config MX3_IPU_IRQS - int "Number of dynamically mapped interrupts for IPU" - depends on MX3_IPU - range 2 137 - default 4 +config IDMA64 + tristate "Intel integrated DMA 64-bit support" + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS help - Out of 137 interrupt sources on i.MX31 IPU only very few are used. - To avoid bloating the irq_desc[] array we allocate a sufficient - number of IRQ slots and map them dynamically to specific sources. + Enable DMA support for Intel Low Power Subsystem such as found on + Intel Skylake PCH. -config PXA_DMA - bool "PXA DMA support" - depends on (ARCH_MMP || ARCH_PXA) +config INTEL_IOATDMA + tristate "Intel I/OAT DMA support" + depends on PCI && X86_64 select DMA_ENGINE - select DMA_VIRTUAL_CHANNELS + select DMA_ENGINE_RAID + select DCA help - Support the DMA engine for PXA. It is also compatible with MMP PDMA - platform. The internal DMA IP of all PXA variants is supported, with - 16 to 32 channels for peripheral to memory or memory to memory - transfers. + Enable support for the Intel(R) I/OAT DMA engine present + in recent Intel Xeon chipsets. -config TXX9_DMAC - tristate "Toshiba TXx9 SoC DMA support" - depends on MACH_TX49XX || MACH_TX39XX + Say Y here if you have such a chipset. + + If unsure, say N. + +config INTEL_IOP_ADMA + tristate "Intel IOP ADMA support" + depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX select DMA_ENGINE + select ASYNC_TX_ENABLE_CHANNEL_SWITCH help - Support the TXx9 SoC internal DMA controller. This can be - integrated in chips such as the Toshiba TX4927/38/39. + Enable support for the Intel(R) IOP Series RAID engines. -config TEGRA20_APB_DMA - bool "NVIDIA Tegra20 APB DMA support" - depends on ARCH_TEGRA +config INTEL_MIC_X100_DMA + tristate "Intel MIC X100 DMA Driver" + depends on 64BIT && X86 && INTEL_MIC_BUS select DMA_ENGINE help - Support for the NVIDIA Tegra20 APB DMA controller driver. The - DMA controller is having multiple DMA channel which can be - configured for different peripherals like audio, UART, SPI, - I2C etc which is in APB bus. - This DMA controller transfers data from memory to peripheral fifo - or vice versa. It does not support memory to memory data transfer. + This enables DMA support for the Intel Many Integrated Core + (MIC) family of PCIe form factor coprocessor X100 devices that + run a 64 bit Linux OS. This driver will be used by both MIC + host and card drivers. -config S3C24XX_DMAC - tristate "Samsung S3C24XX DMA support" - depends on ARCH_S3C24XX + If you are building host kernel with a MIC device or a card + kernel for a MIC device, then say M (recommended) or Y, else + say N. If unsure say N. + + More information about the Intel MIC family as well as the Linux + OS and tools for MIC to use with this driver are available from + . + +config K3_DMA + tristate "Hisilicon K3 DMA support" + depends on ARCH_HI3xxx select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help - Support for the Samsung S3C24XX DMA controller driver. The - DMA controller is having multiple DMA channels which can be - configured for different peripherals like audio, UART, SPI. - The DMA controller can transfer data from memory to peripheral, - periphal to memory, periphal to periphal and memory to memory. + Support the DMA engine for Hisilicon K3 platform + devices. -source "drivers/dma/sh/Kconfig" +config LPC18XX_DMAMUX + bool "NXP LPC18xx/43xx DMA MUX for PL080" + depends on ARCH_LPC18XX || COMPILE_TEST + depends on OF && AMBA_PL08X + select MFD_SYSCON + help + Enable support for DMA on NXP LPC18xx/43xx platforms + with PL080 and multiplexed DMA request lines. -config COH901318 - bool "ST-Ericsson COH901318 DMA support" +config MMP_PDMA + bool "MMP PDMA support" + depends on (ARCH_MMP || ARCH_PXA) select DMA_ENGINE - depends on ARCH_U300 help - Enable support for ST-Ericsson COH 901 318 DMA. + Support the MMP PDMA engine for PXA and MMP platform. -config STE_DMA40 - bool "ST-Ericsson DMA40 support" - depends on ARCH_U8500 +config MMP_TDMA + bool "MMP Two-Channel DMA support" + depends on ARCH_MMP select DMA_ENGINE + select MMP_SRAM help - Support for ST-Ericsson DMA40 controller + Support the MMP Two-Channel DMA engine. + This engine used for MMP Audio DMA and pxa910 SQU. + It needs sram driver under mach-mmp. -config AMCC_PPC440SPE_ADMA - tristate "AMCC PPC440SPe ADMA support" - depends on 440SPe || 440SP +config MOXART_DMA + tristate "MOXART DMA support" + depends on ARCH_MOXART select DMA_ENGINE - select DMA_ENGINE_RAID - select ARCH_HAS_ASYNC_TX_FIND_CHANNEL - select ASYNC_TX_ENABLE_CHANNEL_SWITCH + select DMA_OF + select DMA_VIRTUAL_CHANNELS help - Enable support for the AMCC PPC440SPe RAID engines. + Enable support for the MOXA ART SoC DMA controller. + + Say Y here if you enabled MMP ADMA, otherwise say N. -config TIMB_DMA - tristate "Timberdale FPGA DMA support" - depends on MFD_TIMBERDALE +config MPC512X_DMA + tristate "Freescale MPC512x built-in DMA engine support" + depends on PPC_MPC512x || PPC_MPC831x select DMA_ENGINE - help - Enable support for the Timberdale FPGA DMA engine. + ---help--- + Enable support for the Freescale MPC512x built-in DMA engine. -config SIRF_DMA - tristate "CSR SiRFprimaII/SiRFmarco DMA support" - depends on ARCH_SIRF +config MV_XOR + bool "Marvell XOR engine support" + depends on PLAT_ORION + select DMA_ENGINE + select DMA_ENGINE_RAID + select ASYNC_TX_ENABLE_CHANNEL_SWITCH + ---help--- + Enable support for the Marvell XOR engine. + +config MXS_DMA + bool "MXS DMA support" + depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q + select STMP_DEVICE select DMA_ENGINE help - Enable support for the CSR SiRFprimaII DMA engine. + Support the MXS DMA engine. This engine including APBH-DMA + and APBX-DMA is integrated into Freescale i.MX23/28/MX6Q/MX6DL chips. -config TI_EDMA - bool "TI EDMA support" - depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE +config MX3_IPU + bool "MX3x Image Processing Unit support" + depends on ARCH_MXC select DMA_ENGINE - select DMA_VIRTUAL_CHANNELS - select TI_PRIV_EDMA - default n + default y help - Enable support for the TI EDMA controller. This DMA - engine is found on TI DaVinci and AM33xx parts. - -config TI_DMA_CROSSBAR - bool + If you plan to use the Image Processing unit in the i.MX3x, say + Y here. If unsure, select Y. -config ARCH_HAS_ASYNC_TX_FIND_CHANNEL - bool +config MX3_IPU_IRQS + int "Number of dynamically mapped interrupts for IPU" + depends on MX3_IPU + range 2 137 + default 4 + help + Out of 137 interrupt sources on i.MX31 IPU only very few are used. + To avoid bloating the irq_desc[] array we allocate a sufficient + number of IRQ slots and map them dynamically to specific sources. -config PL330_DMA - tristate "DMA API Driver for PL330" +config NBPFAXI_DMA + tristate "Renesas Type-AXI NBPF DMA support" select DMA_ENGINE - depends on ARM_AMBA + depends on ARM || COMPILE_TEST help - Select if your platform has one or more PL330 DMACs. - You need to provide platform specific settings via - platform_data for a dma-pl330 device. + Support for "Type-AXI" NBPF DMA IPs from Renesas config PCH_DMA tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA" @@ -285,72 +388,87 @@ config PCH_DMA ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series. ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH. -config IMX_SDMA - tristate "i.MX SDMA support" - depends on ARCH_MXC +config PL330_DMA + tristate "DMA API Driver for PL330" select DMA_ENGINE + depends on ARM_AMBA help - Support the i.MX SDMA engine. This engine is integrated into - Freescale i.MX25/31/35/51/53/6 chips. + Select if your platform has one or more PL330 DMACs. + You need to provide platform specific settings via + platform_data for a dma-pl330 device. -config IMX_DMA - tristate "i.MX DMA support" - depends on ARCH_MXC +config PXA_DMA + bool "PXA DMA support" + depends on (ARCH_MMP || ARCH_PXA) select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS help - Support the i.MX DMA engine. This engine is integrated into - Freescale i.MX1/21/27 chips. + Support the DMA engine for PXA. It is also compatible with MMP PDMA + platform. The internal DMA IP of all PXA variants is supported, with + 16 to 32 channels for peripheral to memory or memory to memory + transfers. -config MXS_DMA - bool "MXS DMA support" - depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q - select STMP_DEVICE +config QCOM_BAM_DMA + tristate "QCOM BAM DMA support" + depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM) + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + ---help--- + Enable support for the QCOM BAM DMA controller. This controller + provides DMA capabilities for a variety of on-chip devices. + +config SIRF_DMA + tristate "CSR SiRFprimaII/SiRFmarco DMA support" + depends on ARCH_SIRF select DMA_ENGINE help - Support the MXS DMA engine. This engine including APBH-DMA - and APBX-DMA is integrated into Freescale i.MX23/28/MX6Q/MX6DL chips. + Enable support for the CSR SiRFprimaII DMA engine. -config EP93XX_DMA - bool "Cirrus Logic EP93xx DMA support" - depends on ARCH_EP93XX +config STE_DMA40 + bool "ST-Ericsson DMA40 support" + depends on ARCH_U8500 select DMA_ENGINE help - Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller. + Support for ST-Ericsson DMA40 controller -config DMA_SA11X0 - tristate "SA-11x0 DMA support" - depends on ARCH_SA1100 +config S3C24XX_DMAC + tristate "Samsung S3C24XX DMA support" + depends on ARCH_S3C24XX select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help - Support the DMA engine found on Intel StrongARM SA-1100 and - SA-1110 SoCs. This DMA engine can only be used with on-chip - devices. + Support for the Samsung S3C24XX DMA controller driver. The + DMA controller is having multiple DMA channels which can be + configured for different peripherals like audio, UART, SPI. + The DMA controller can transfer data from memory to peripheral, + periphal to memory, periphal to periphal and memory to memory. -config MMP_TDMA - bool "MMP Two-Channel DMA support" - depends on ARCH_MMP +config TXX9_DMAC + tristate "Toshiba TXx9 SoC DMA support" + depends on MACH_TX49XX || MACH_TX39XX select DMA_ENGINE - select MMP_SRAM help - Support the MMP Two-Channel DMA engine. - This engine used for MMP Audio DMA and pxa910 SQU. - It needs sram driver under mach-mmp. - - Say Y here if you enabled MMP ADMA, otherwise say N. + Support the TXx9 SoC internal DMA controller. This can be + integrated in chips such as the Toshiba TX4927/38/39. -config DMA_OMAP - tristate "OMAP DMA support" - depends on ARCH_OMAP +config TEGRA20_APB_DMA + bool "NVIDIA Tegra20 APB DMA support" + depends on ARCH_TEGRA select DMA_ENGINE - select DMA_VIRTUAL_CHANNELS - select TI_DMA_CROSSBAR if SOC_DRA7XX + help + Support for the NVIDIA Tegra20 APB DMA controller driver. The + DMA controller is having multiple DMA channel which can be + configured for different peripherals like audio, UART, SPI, + I2C etc which is in APB bus. + This DMA controller transfers data from memory to peripheral fifo + or vice versa. It does not support memory to memory data transfer. -config DMA_BCM2835 - tristate "BCM2835 DMA engine support" - depends on ARCH_BCM2835 +config TIMB_DMA + tristate "Timberdale FPGA DMA support" + depends on MFD_TIMBERDALE select DMA_ENGINE - select DMA_VIRTUAL_CHANNELS + help + Enable support for the Timberdale FPGA DMA engine. config TI_CPPI41 tristate "AM33xx CPPI41 DMA support" @@ -360,56 +478,28 @@ config TI_CPPI41 The Communications Port Programming Interface (CPPI) 4.1 DMA engine is currently used by the USB driver on AM335x platforms. -config MMP_PDMA - bool "MMP PDMA support" - depends on (ARCH_MMP || ARCH_PXA) - select DMA_ENGINE - help - Support the MMP PDMA engine for PXA and MMP platform. - -config DMA_JZ4740 - tristate "JZ4740 DMA support" - depends on MACH_JZ4740 - select DMA_ENGINE - select DMA_VIRTUAL_CHANNELS - -config DMA_JZ4780 - tristate "JZ4780 DMA support" - depends on MACH_JZ4780 - select DMA_ENGINE - select DMA_VIRTUAL_CHANNELS - help - This selects support for the DMA controller in Ingenic JZ4780 SoCs. - If you have a board based on such a SoC and wish to use DMA for - devices which can use the DMA controller, say Y or M here. +config TI_DMA_CROSSBAR + bool -config K3_DMA - tristate "Hisilicon K3 DMA support" - depends on ARCH_HI3xxx +config TI_EDMA + bool "TI EDMA support" + depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE select DMA_ENGINE select DMA_VIRTUAL_CHANNELS + select TI_PRIV_EDMA + default n help - Support the DMA engine for Hisilicon K3 platform - devices. + Enable support for the TI EDMA controller. This DMA + engine is found on TI DaVinci and AM33xx parts. -config MOXART_DMA - tristate "MOXART DMA support" - depends on ARCH_MOXART - select DMA_ENGINE - select DMA_OF - select DMA_VIRTUAL_CHANNELS - help - Enable support for the MOXA ART SoC DMA controller. - -config FSL_EDMA - tristate "Freescale eDMA engine support" - depends on OF +config XGENE_DMA + tristate "APM X-Gene DMA support" + depends on ARCH_XGENE || COMPILE_TEST select DMA_ENGINE - select DMA_VIRTUAL_CHANNELS + select DMA_ENGINE_RAID + select ASYNC_TX_ENABLE_CHANNEL_SWITCH help - Support the Freescale eDMA engine with programmable channel - multiplexing capability for DMA request sources(slot). - This module can be found on Freescale Vybrid and LS-1 SoCs. + Enable support for the APM X-Gene SoC DMA engine. config XILINX_VDMA tristate "Xilinx AXI VDMA Engine" @@ -425,55 +515,25 @@ config XILINX_VDMA channels, Memory Mapped to Stream (MM2S) and Stream to Memory Mapped (S2MM) for the data transfers. -config DMA_SUN6I - tristate "Allwinner A31 SoCs DMA support" - depends on MACH_SUN6I || MACH_SUN8I || COMPILE_TEST - depends on RESET_CONTROLLER - select DMA_ENGINE - select DMA_VIRTUAL_CHANNELS - help - Support for the DMA engine first found in Allwinner A31 SoCs. - -config NBPFAXI_DMA - tristate "Renesas Type-AXI NBPF DMA support" - select DMA_ENGINE - depends on ARM || COMPILE_TEST - help - Support for "Type-AXI" NBPF DMA IPs from Renesas - -config IMG_MDC_DMA - tristate "IMG MDC support" - depends on MIPS || COMPILE_TEST - depends on MFD_SYSCON +config ZX_DMA + tristate "ZTE ZX296702 DMA support" + depends on ARCH_ZX select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help - Enable support for the IMG multi-threaded DMA controller (MDC). + Support the DMA engine for ZTE ZX296702 platform devices. -config XGENE_DMA - tristate "APM X-Gene DMA support" - depends on ARCH_XGENE || COMPILE_TEST - select DMA_ENGINE - select DMA_ENGINE_RAID - select ASYNC_TX_ENABLE_CHANNEL_SWITCH - help - Enable support for the APM X-Gene SoC DMA engine. -config DMA_ENGINE - bool +# driver files +source "drivers/dma/bestcomm/Kconfig" -config DMA_VIRTUAL_CHANNELS - tristate +source "drivers/dma/dw/Kconfig" -config DMA_ACPI - def_bool y - depends on ACPI +source "drivers/dma/hsu/Kconfig" -config DMA_OF - def_bool y - depends on OF - select DMA_ENGINE +source "drivers/dma/sh/Kconfig" +# clients comment "DMA Clients" depends on DMA_ENGINE @@ -498,13 +558,4 @@ config DMATEST config DMA_ENGINE_RAID bool -config QCOM_BAM_DMA - tristate "QCOM BAM DMA support" - depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM) - select DMA_ENGINE - select DMA_VIRTUAL_CHANNELS - ---help--- - Enable support for the QCOM BAM DMA controller. This controller - provides DMA capabilities for a variety of on-chip devices. - endif diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 6a4d6f282..7711a7180 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -1,58 +1,69 @@ +#dmaengine debug flags subdir-ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG subdir-ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG +#core obj-$(CONFIG_DMA_ENGINE) += dmaengine.o obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o obj-$(CONFIG_DMA_ACPI) += acpi-dma.o obj-$(CONFIG_DMA_OF) += of-dma.o +#dmatest obj-$(CONFIG_DMATEST) += dmatest.o -obj-$(CONFIG_INTEL_IOATDMA) += ioat/ -obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o -obj-$(CONFIG_FSL_DMA) += fsldma.o -obj-$(CONFIG_HSU_DMA) += hsu/ -obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o -obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ -obj-$(CONFIG_MV_XOR) += mv_xor.o -obj-$(CONFIG_DW_DMAC_CORE) += dw/ + +#devices +obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o +obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_AT_XDMAC) += at_xdmac.o -obj-$(CONFIG_MX3_IPU) += ipu/ -obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o -obj-$(CONFIG_RENESAS_DMA) += sh/ +obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o -obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ -obj-$(CONFIG_IMX_SDMA) += imx-sdma.o +obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o +obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o +obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o +obj-$(CONFIG_DMA_OMAP) += omap-dma.o +obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o +obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o +obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o +obj-$(CONFIG_DW_DMAC_CORE) += dw/ +obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o +obj-$(CONFIG_FSL_DMA) += fsldma.o +obj-$(CONFIG_FSL_EDMA) += fsl-edma.o +obj-$(CONFIG_FSL_RAID) += fsl_raid.o +obj-$(CONFIG_HSU_DMA) += hsu/ +obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o obj-$(CONFIG_IMX_DMA) += imx-dma.o +obj-$(CONFIG_IMX_SDMA) += imx-sdma.o +obj-$(CONFIG_IDMA64) += idma64.o +obj-$(CONFIG_INTEL_IOATDMA) += ioat/ +obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o +obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o +obj-$(CONFIG_K3_DMA) += k3dma.o +obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o +obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o +obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o +obj-$(CONFIG_MOXART_DMA) += moxart-dma.o +obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o +obj-$(CONFIG_MV_XOR) += mv_xor.o obj-$(CONFIG_MXS_DMA) += mxs-dma.o +obj-$(CONFIG_MX3_IPU) += ipu/ +obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o +obj-$(CONFIG_PCH_DMA) += pch_dma.o +obj-$(CONFIG_PL330_DMA) += pl330.o +obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ obj-$(CONFIG_PXA_DMA) += pxa_dma.o -obj-$(CONFIG_TIMB_DMA) += timb_dma.o +obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o +obj-$(CONFIG_RENESAS_DMA) += sh/ obj-$(CONFIG_SIRF_DMA) += sirf-dma.o -obj-$(CONFIG_TI_EDMA) += edma.o obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o -obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o -obj-$(CONFIG_PL330_DMA) += pl330.o -obj-$(CONFIG_PCH_DMA) += pch_dma.o -obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o -obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o -obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o -obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o -obj-$(CONFIG_DMA_OMAP) += omap-dma.o -obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o -obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o -obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o -obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o -obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o +obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o +obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o +obj-$(CONFIG_TIMB_DMA) += timb_dma.o obj-$(CONFIG_TI_CPPI41) += cppi41.o -obj-$(CONFIG_K3_DMA) += k3dma.o -obj-$(CONFIG_MOXART_DMA) += moxart-dma.o -obj-$(CONFIG_FSL_RAID) += fsl_raid.o -obj-$(CONFIG_FSL_EDMA) += fsl-edma.o -obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o -obj-y += xilinx/ -obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o -obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o -obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o -obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o +obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o +obj-$(CONFIG_TI_EDMA) += edma.o obj-$(CONFIG_XGENE_DMA) += xgene-dma.o +obj-$(CONFIG_ZX_DMA) += zx296702_dma.o + +obj-y += xilinx/ diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 5de3cf453..9b42c0588 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -83,6 +83,8 @@ #include #include #include +#include +#include #include #include #include @@ -2030,10 +2032,188 @@ static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) } #endif +#ifdef CONFIG_OF +static struct dma_chan *pl08x_find_chan_id(struct pl08x_driver_data *pl08x, + u32 id) +{ + struct pl08x_dma_chan *chan; + + list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { + if (chan->signal == id) + return &chan->vc.chan; + } + + return NULL; +} + +static struct dma_chan *pl08x_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct pl08x_driver_data *pl08x = ofdma->of_dma_data; + struct pl08x_channel_data *data; + struct pl08x_dma_chan *chan; + struct dma_chan *dma_chan; + + if (!pl08x) + return NULL; + + if (dma_spec->args_count != 2) + return NULL; + + dma_chan = pl08x_find_chan_id(pl08x, dma_spec->args[0]); + if (dma_chan) + return dma_get_slave_channel(dma_chan); + + chan = devm_kzalloc(pl08x->slave.dev, sizeof(*chan) + sizeof(*data), + GFP_KERNEL); + if (!chan) + return NULL; + + data = (void *)&chan[1]; + data->bus_id = "(none)"; + data->periph_buses = dma_spec->args[1]; + + chan->cd = data; + chan->host = pl08x; + chan->slave = true; + chan->name = data->bus_id; + chan->state = PL08X_CHAN_IDLE; + chan->signal = dma_spec->args[0]; + chan->vc.desc_free = pl08x_desc_free; + + vchan_init(&chan->vc, &pl08x->slave); + + return dma_get_slave_channel(&chan->vc.chan); +} + +static int pl08x_of_probe(struct amba_device *adev, + struct pl08x_driver_data *pl08x, + struct device_node *np) +{ + struct pl08x_platform_data *pd; + u32 cctl_memcpy = 0; + u32 val; + int ret; + + pd = devm_kzalloc(&adev->dev, sizeof(*pd), GFP_KERNEL); + if (!pd) + return -ENOMEM; + + /* Eligible bus masters for fetching LLIs */ + if (of_property_read_bool(np, "lli-bus-interface-ahb1")) + pd->lli_buses |= PL08X_AHB1; + if (of_property_read_bool(np, "lli-bus-interface-ahb2")) + pd->lli_buses |= PL08X_AHB2; + if (!pd->lli_buses) { + dev_info(&adev->dev, "no bus masters for LLIs stated, assume all\n"); + pd->lli_buses |= PL08X_AHB1 | PL08X_AHB2; + } + + /* Eligible bus masters for memory access */ + if (of_property_read_bool(np, "mem-bus-interface-ahb1")) + pd->mem_buses |= PL08X_AHB1; + if (of_property_read_bool(np, "mem-bus-interface-ahb2")) + pd->mem_buses |= PL08X_AHB2; + if (!pd->mem_buses) { + dev_info(&adev->dev, "no bus masters for memory stated, assume all\n"); + pd->mem_buses |= PL08X_AHB1 | PL08X_AHB2; + } + + /* Parse the memcpy channel properties */ + ret = of_property_read_u32(np, "memcpy-burst-size", &val); + if (ret) { + dev_info(&adev->dev, "no memcpy burst size specified, using 1 byte\n"); + val = 1; + } + switch (val) { + default: + dev_err(&adev->dev, "illegal burst size for memcpy, set to 1\n"); + /* Fall through */ + case 1: + cctl_memcpy |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT | + PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT; + break; + case 4: + cctl_memcpy |= PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT | + PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT; + break; + case 8: + cctl_memcpy |= PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT | + PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT; + break; + case 16: + cctl_memcpy |= PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | + PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT; + break; + case 32: + cctl_memcpy |= PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT | + PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT; + break; + case 64: + cctl_memcpy |= PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT | + PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT; + break; + case 128: + cctl_memcpy |= PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT | + PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT; + break; + case 256: + cctl_memcpy |= PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT | + PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT; + break; + } + + ret = of_property_read_u32(np, "memcpy-bus-width", &val); + if (ret) { + dev_info(&adev->dev, "no memcpy bus width specified, using 8 bits\n"); + val = 8; + } + switch (val) { + default: + dev_err(&adev->dev, "illegal bus width for memcpy, set to 8 bits\n"); + /* Fall through */ + case 8: + cctl_memcpy |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT | + PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; + break; + case 16: + cctl_memcpy |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT | + PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; + break; + case 32: + cctl_memcpy |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | + PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; + break; + } + + /* This is currently the only thing making sense */ + cctl_memcpy |= PL080_CONTROL_PROT_SYS; + + /* Set up memcpy channel */ + pd->memcpy_channel.bus_id = "memcpy"; + pd->memcpy_channel.cctl_memcpy = cctl_memcpy; + /* Use the buses that can access memory, obviously */ + pd->memcpy_channel.periph_buses = pd->mem_buses; + + pl08x->pd = pd; + + return of_dma_controller_register(adev->dev.of_node, pl08x_of_xlate, + pl08x); +} +#else +static inline int pl08x_of_probe(struct amba_device *adev, + struct pl08x_driver_data *pl08x, + struct device_node *np) +{ + return -EINVAL; +} +#endif + static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) { struct pl08x_driver_data *pl08x; const struct vendor_data *vd = id->data; + struct device_node *np = adev->dev.of_node; u32 tsfr_size; int ret = 0; int i; @@ -2093,9 +2273,15 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) /* Get the platform data */ pl08x->pd = dev_get_platdata(&adev->dev); if (!pl08x->pd) { - dev_err(&adev->dev, "no platform data supplied\n"); - ret = -EINVAL; - goto out_no_platdata; + if (np) { + ret = pl08x_of_probe(adev, pl08x, np); + if (ret) + goto out_no_platdata; + } else { + dev_err(&adev->dev, "no platform data supplied\n"); + ret = -EINVAL; + goto out_no_platdata; + } } /* Assign useful pointers to the driver state */ diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index d3629b748..58d406230 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -448,6 +448,7 @@ static void atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) { struct dma_async_tx_descriptor *txd = &desc->txd; + struct at_dma *atdma = to_at_dma(atchan->chan_common.device); dev_vdbg(chan2dev(&atchan->chan_common), "descriptor %u complete\n", txd->cookie); @@ -456,6 +457,13 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) if (!atc_chan_is_cyclic(atchan)) dma_cookie_complete(txd); + /* If the transfer was a memset, free our temporary buffer */ + if (desc->memset) { + dma_pool_free(atdma->memset_pool, desc->memset_vaddr, + desc->memset_paddr); + desc->memset = false; + } + /* move children to free_list */ list_splice_init(&desc->tx_list, &atchan->free_list); /* move myself to free_list */ @@ -717,14 +725,14 @@ atc_prep_dma_interleaved(struct dma_chan *chan, size_t len = 0; int i; + if (unlikely(!xt || xt->numf != 1 || !xt->frame_size)) + return NULL; + dev_info(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", __func__, xt->src_start, xt->dst_start, xt->numf, xt->frame_size, flags); - if (unlikely(!xt || xt->numf != 1 || !xt->frame_size)) - return NULL; - /* * The controller can only "skip" X bytes every Y bytes, so we * need to make sure we are given a template that fit that @@ -873,6 +881,93 @@ err_desc_get: return NULL; } +/** + * atc_prep_dma_memset - prepare a memcpy operation + * @chan: the channel to prepare operation on + * @dest: operation virtual destination address + * @value: value to set memory buffer to + * @len: operation length + * @flags: tx descriptor status flags + */ +static struct dma_async_tx_descriptor * +atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, + size_t len, unsigned long flags) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma *atdma = to_at_dma(chan->device); + struct at_desc *desc = NULL; + size_t xfer_count; + u32 ctrla; + u32 ctrlb; + + dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__, + dest, value, len, flags); + + if (unlikely(!len)) { + dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); + return NULL; + } + + if (!is_dma_fill_aligned(chan->device, dest, 0, len)) { + dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n", + __func__); + return NULL; + } + + xfer_count = len >> 2; + if (xfer_count > ATC_BTSIZE_MAX) { + dev_err(chan2dev(chan), "%s: buffer is too big\n", + __func__); + return NULL; + } + + ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN + | ATC_SRC_ADDR_MODE_FIXED + | ATC_DST_ADDR_MODE_INCR + | ATC_FC_MEM2MEM; + + ctrla = ATC_SRC_WIDTH(2) | + ATC_DST_WIDTH(2); + + desc = atc_desc_get(atchan); + if (!desc) { + dev_err(chan2dev(chan), "%s: can't get a descriptor\n", + __func__); + return NULL; + } + + desc->memset_vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, + &desc->memset_paddr); + if (!desc->memset_vaddr) { + dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n", + __func__); + goto err_put_desc; + } + + *desc->memset_vaddr = value; + desc->memset = true; + + desc->lli.saddr = desc->memset_paddr; + desc->lli.daddr = dest; + desc->lli.ctrla = ctrla | xfer_count; + desc->lli.ctrlb = ctrlb; + + desc->txd.cookie = -EBUSY; + desc->len = len; + desc->total_len = len; + + /* set end-of-link on the descriptor */ + set_desc_eol(desc); + + desc->txd.flags = flags; + + return &desc->txd; + +err_put_desc: + atc_desc_put(atchan, desc); + return NULL; +} + /** * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction @@ -1755,6 +1850,8 @@ static int __init at_dma_probe(struct platform_device *pdev) dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask); dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask); dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); + dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask); + dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask); dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask); @@ -1818,7 +1915,16 @@ static int __init at_dma_probe(struct platform_device *pdev) if (!atdma->dma_desc_pool) { dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); err = -ENOMEM; - goto err_pool_create; + goto err_desc_pool_create; + } + + /* create a pool of consistent memory blocks for memset blocks */ + atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool", + &pdev->dev, sizeof(int), 4, 0); + if (!atdma->memset_pool) { + dev_err(&pdev->dev, "No memory for memset dma pool\n"); + err = -ENOMEM; + goto err_memset_pool_create; } /* clear any pending interrupt */ @@ -1864,6 +1970,11 @@ static int __init at_dma_probe(struct platform_device *pdev) if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; + if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) { + atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset; + atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES; + } + if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; /* controller can do slave DMA: can trigger cyclic transfers */ @@ -1884,8 +1995,9 @@ static int __init at_dma_probe(struct platform_device *pdev) dma_writel(atdma, EN, AT_DMA_ENABLE); - dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n", + dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n", dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", + dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "", dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", dma_has_cap(DMA_SG, atdma->dma_common.cap_mask) ? "sg-cpy " : "", plat_dat->nr_channels); @@ -1910,8 +2022,10 @@ static int __init at_dma_probe(struct platform_device *pdev) err_of_dma_controller_register: dma_async_device_unregister(&atdma->dma_common); + dma_pool_destroy(atdma->memset_pool); +err_memset_pool_create: dma_pool_destroy(atdma->dma_desc_pool); -err_pool_create: +err_desc_pool_create: free_irq(platform_get_irq(pdev, 0), atdma); err_irq: clk_disable_unprepare(atdma->clk); @@ -1936,6 +2050,7 @@ static int at_dma_remove(struct platform_device *pdev) at_dma_off(atdma); dma_async_device_unregister(&atdma->dma_common); + dma_pool_destroy(atdma->memset_pool); dma_pool_destroy(atdma->dma_desc_pool); free_irq(platform_get_irq(pdev, 0), atdma); diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 7f5a08230..c3bebbe89 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -200,6 +200,11 @@ struct at_desc { size_t boundary; size_t dst_hole; size_t src_hole; + + /* Memset temporary buffer */ + bool memset; + dma_addr_t memset_paddr; + int *memset_vaddr; }; static inline struct at_desc * @@ -330,6 +335,7 @@ struct at_dma { u8 all_chan_mask; struct dma_pool *dma_desc_pool; + struct dma_pool *memset_pool; /* AT THE END channels table */ struct at_dma_chan chan[0]; }; diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index da7917a2e..dd24375b7 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -634,12 +634,12 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context) { - struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); - struct at_xdmac_desc *first = NULL, *prev = NULL; - struct scatterlist *sg; - int i; - unsigned int xfer_size = 0; - unsigned long irqflags; + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + struct at_xdmac_desc *first = NULL, *prev = NULL; + struct scatterlist *sg; + int i; + unsigned int xfer_size = 0; + unsigned long irqflags; struct dma_async_tx_descriptor *ret = NULL; if (!sgl) @@ -1141,7 +1141,7 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, * SAMA5D4x), so we can use the same interface for source and dest, * that solves the fact we don't know the direction. */ - u32 chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM + u32 chan_cc = AT_XDMAC_CC_DAM_UBS_AM | AT_XDMAC_CC_SAM_INCREMENTED_AM | AT_XDMAC_CC_DIF(0) | AT_XDMAC_CC_SIF(0) @@ -1209,6 +1209,168 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, return &desc->tx_dma_desc; } +static struct dma_async_tx_descriptor * +at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, int value, + unsigned long flags) +{ + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + struct at_xdmac_desc *desc, *pdesc = NULL, + *ppdesc = NULL, *first = NULL; + struct scatterlist *sg, *psg = NULL, *ppsg = NULL; + size_t stride = 0, pstride = 0, len = 0; + int i; + + if (!sgl) + return NULL; + + dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n", + __func__, sg_len, value, flags); + + /* Prepare descriptors. */ + for_each_sg(sgl, sg, sg_len, i) { + dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n", + __func__, sg_dma_address(sg), sg_dma_len(sg), + value, flags); + desc = at_xdmac_memset_create_desc(chan, atchan, + sg_dma_address(sg), + sg_dma_len(sg), + value); + if (!desc && first) + list_splice_init(&first->descs_list, + &atchan->free_descs_list); + + if (!first) + first = desc; + + /* Update our strides */ + pstride = stride; + if (psg) + stride = sg_dma_address(sg) - + (sg_dma_address(psg) + sg_dma_len(psg)); + + /* + * The scatterlist API gives us only the address and + * length of each elements. + * + * Unfortunately, we don't have the stride, which we + * will need to compute. + * + * That make us end up in a situation like this one: + * len stride len stride len + * +-------+ +-------+ +-------+ + * | N-2 | | N-1 | | N | + * +-------+ +-------+ +-------+ + * + * We need all these three elements (N-2, N-1 and N) + * to actually take the decision on whether we need to + * queue N-1 or reuse N-2. + * + * We will only consider N if it is the last element. + */ + if (ppdesc && pdesc) { + if ((stride == pstride) && + (sg_dma_len(ppsg) == sg_dma_len(psg))) { + dev_dbg(chan2dev(chan), + "%s: desc 0x%p can be merged with desc 0x%p\n", + __func__, pdesc, ppdesc); + + /* + * Increment the block count of the + * N-2 descriptor + */ + at_xdmac_increment_block_count(chan, ppdesc); + ppdesc->lld.mbr_dus = stride; + + /* + * Put back the N-1 descriptor in the + * free descriptor list + */ + list_add_tail(&pdesc->desc_node, + &atchan->free_descs_list); + + /* + * Make our N-1 descriptor pointer + * point to the N-2 since they were + * actually merged. + */ + pdesc = ppdesc; + + /* + * Rule out the case where we don't have + * pstride computed yet (our second sg + * element) + * + * We also want to catch the case where there + * would be a negative stride, + */ + } else if (pstride || + sg_dma_address(sg) < sg_dma_address(psg)) { + /* + * Queue the N-1 descriptor after the + * N-2 + */ + at_xdmac_queue_desc(chan, ppdesc, pdesc); + + /* + * Add the N-1 descriptor to the list + * of the descriptors used for this + * transfer + */ + list_add_tail(&desc->desc_node, + &first->descs_list); + dev_dbg(chan2dev(chan), + "%s: add desc 0x%p to descs_list 0x%p\n", + __func__, desc, first); + } + } + + /* + * If we are the last element, just see if we have the + * same size than the previous element. + * + * If so, we can merge it with the previous descriptor + * since we don't care about the stride anymore. + */ + if ((i == (sg_len - 1)) && + sg_dma_len(ppsg) == sg_dma_len(psg)) { + dev_dbg(chan2dev(chan), + "%s: desc 0x%p can be merged with desc 0x%p\n", + __func__, desc, pdesc); + + /* + * Increment the block count of the N-1 + * descriptor + */ + at_xdmac_increment_block_count(chan, pdesc); + pdesc->lld.mbr_dus = stride; + + /* + * Put back the N descriptor in the free + * descriptor list + */ + list_add_tail(&desc->desc_node, + &atchan->free_descs_list); + } + + /* Update our descriptors */ + ppdesc = pdesc; + pdesc = desc; + + /* Update our scatter pointers */ + ppsg = psg; + psg = sg; + + len += sg_dma_len(sg); + } + + first->tx_dma_desc.cookie = -EBUSY; + first->tx_dma_desc.flags = flags; + first->xfer_size = len; + + return &first->tx_dma_desc; +} + static enum dma_status at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) @@ -1742,6 +1904,7 @@ static int at_xdmac_probe(struct platform_device *pdev) dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask); dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask); dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask); + dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask); dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask); /* * Without DMA_PRIVATE the driver is not able to allocate more than @@ -1757,6 +1920,7 @@ static int at_xdmac_probe(struct platform_device *pdev) atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved; atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy; atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset; + atxdmac->dma.device_prep_dma_memset_sg = at_xdmac_prep_dma_memset_sg; atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg; atxdmac->dma.device_config = at_xdmac_device_config; atxdmac->dma.device_pause = at_xdmac_device_pause; diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index fd22dd369..c340ca9bd 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -2730,7 +2730,7 @@ static int __init coh901318_probe(struct platform_device *pdev) * This controller can only access address at even 32bit boundaries, * i.e. 2^2 */ - base->dma_memcpy.copy_align = 2; + base->dma_memcpy.copy_align = DMAENGINE_ALIGN_4_BYTES; err = dma_async_device_register(&base->dma_memcpy); if (err) diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c new file mode 100644 index 000000000..5b2395e7e --- /dev/null +++ b/drivers/dma/dma-axi-dmac.c @@ -0,0 +1,691 @@ +/* + * Driver for the Analog Devices AXI-DMAC core + * + * Copyright 2013-2015 Analog Devices Inc. + * Author: Lars-Peter Clausen + * + * Licensed under the GPL-2. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "dmaengine.h" +#include "virt-dma.h" + +/* + * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has + * various instantiation parameters which decided the exact feature set support + * by the core. + * + * Each channel of the core has a source interface and a destination interface. + * The number of channels and the type of the channel interfaces is selected at + * configuration time. A interface can either be a connected to a central memory + * interconnect, which allows access to system memory, or it can be connected to + * a dedicated bus which is directly connected to a data port on a peripheral. + * Given that those are configuration options of the core that are selected when + * it is instantiated this means that they can not be changed by software at + * runtime. By extension this means that each channel is uni-directional. It can + * either be device to memory or memory to device, but not both. Also since the + * device side is a dedicated data bus only connected to a single peripheral + * there is no address than can or needs to be configured for the device side. + */ + +#define AXI_DMAC_REG_IRQ_MASK 0x80 +#define AXI_DMAC_REG_IRQ_PENDING 0x84 +#define AXI_DMAC_REG_IRQ_SOURCE 0x88 + +#define AXI_DMAC_REG_CTRL 0x400 +#define AXI_DMAC_REG_TRANSFER_ID 0x404 +#define AXI_DMAC_REG_START_TRANSFER 0x408 +#define AXI_DMAC_REG_FLAGS 0x40c +#define AXI_DMAC_REG_DEST_ADDRESS 0x410 +#define AXI_DMAC_REG_SRC_ADDRESS 0x414 +#define AXI_DMAC_REG_X_LENGTH 0x418 +#define AXI_DMAC_REG_Y_LENGTH 0x41c +#define AXI_DMAC_REG_DEST_STRIDE 0x420 +#define AXI_DMAC_REG_SRC_STRIDE 0x424 +#define AXI_DMAC_REG_TRANSFER_DONE 0x428 +#define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c +#define AXI_DMAC_REG_STATUS 0x430 +#define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434 +#define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438 + +#define AXI_DMAC_CTRL_ENABLE BIT(0) +#define AXI_DMAC_CTRL_PAUSE BIT(1) + +#define AXI_DMAC_IRQ_SOT BIT(0) +#define AXI_DMAC_IRQ_EOT BIT(1) + +#define AXI_DMAC_FLAG_CYCLIC BIT(0) + +struct axi_dmac_sg { + dma_addr_t src_addr; + dma_addr_t dest_addr; + unsigned int x_len; + unsigned int y_len; + unsigned int dest_stride; + unsigned int src_stride; + unsigned int id; +}; + +struct axi_dmac_desc { + struct virt_dma_desc vdesc; + bool cyclic; + + unsigned int num_submitted; + unsigned int num_completed; + unsigned int num_sgs; + struct axi_dmac_sg sg[]; +}; + +struct axi_dmac_chan { + struct virt_dma_chan vchan; + + struct axi_dmac_desc *next_desc; + struct list_head active_descs; + enum dma_transfer_direction direction; + + unsigned int src_width; + unsigned int dest_width; + unsigned int src_type; + unsigned int dest_type; + + unsigned int max_length; + unsigned int align_mask; + + bool hw_cyclic; + bool hw_2d; +}; + +struct axi_dmac { + void __iomem *base; + int irq; + + struct clk *clk; + + struct dma_device dma_dev; + struct axi_dmac_chan chan; + + struct device_dma_parameters dma_parms; +}; + +static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan) +{ + return container_of(chan->vchan.chan.device, struct axi_dmac, + dma_dev); +} + +static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c) +{ + return container_of(c, struct axi_dmac_chan, vchan.chan); +} + +static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc) +{ + return container_of(vdesc, struct axi_dmac_desc, vdesc); +} + +static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg, + unsigned int val) +{ + writel(val, axi_dmac->base + reg); +} + +static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg) +{ + return readl(axi_dmac->base + reg); +} + +static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan) +{ + return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM; +} + +static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan) +{ + return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM; +} + +static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len) +{ + if (len == 0 || len > chan->max_length) + return false; + if ((len & chan->align_mask) != 0) /* Not aligned */ + return false; + return true; +} + +static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr) +{ + if ((addr & chan->align_mask) != 0) /* Not aligned */ + return false; + return true; +} + +static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) +{ + struct axi_dmac *dmac = chan_to_axi_dmac(chan); + struct virt_dma_desc *vdesc; + struct axi_dmac_desc *desc; + struct axi_dmac_sg *sg; + unsigned int flags = 0; + unsigned int val; + + val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER); + if (val) /* Queue is full, wait for the next SOT IRQ */ + return; + + desc = chan->next_desc; + + if (!desc) { + vdesc = vchan_next_desc(&chan->vchan); + if (!vdesc) + return; + list_move_tail(&vdesc->node, &chan->active_descs); + desc = to_axi_dmac_desc(vdesc); + } + sg = &desc->sg[desc->num_submitted]; + + desc->num_submitted++; + if (desc->num_submitted == desc->num_sgs) + chan->next_desc = NULL; + else + chan->next_desc = desc; + + sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID); + + if (axi_dmac_dest_is_mem(chan)) { + axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr); + axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride); + } + + if (axi_dmac_src_is_mem(chan)) { + axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr); + axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride); + } + + /* + * If the hardware supports cyclic transfers and there is no callback to + * call, enable hw cyclic mode to avoid unnecessary interrupts. + */ + if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback) + flags |= AXI_DMAC_FLAG_CYCLIC; + + axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1); + axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1); + axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags); + axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1); +} + +static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan) +{ + return list_first_entry_or_null(&chan->active_descs, + struct axi_dmac_desc, vdesc.node); +} + +static void axi_dmac_transfer_done(struct axi_dmac_chan *chan, + unsigned int completed_transfers) +{ + struct axi_dmac_desc *active; + struct axi_dmac_sg *sg; + + active = axi_dmac_active_desc(chan); + if (!active) + return; + + if (active->cyclic) { + vchan_cyclic_callback(&active->vdesc); + } else { + do { + sg = &active->sg[active->num_completed]; + if (!(BIT(sg->id) & completed_transfers)) + break; + active->num_completed++; + if (active->num_completed == active->num_sgs) { + list_del(&active->vdesc.node); + vchan_cookie_complete(&active->vdesc); + active = axi_dmac_active_desc(chan); + } + } while (active); + } +} + +static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) +{ + struct axi_dmac *dmac = devid; + unsigned int pending; + + pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING); + axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending); + + spin_lock(&dmac->chan.vchan.lock); + /* One or more transfers have finished */ + if (pending & AXI_DMAC_IRQ_EOT) { + unsigned int completed; + + completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE); + axi_dmac_transfer_done(&dmac->chan, completed); + } + /* Space has become available in the descriptor queue */ + if (pending & AXI_DMAC_IRQ_SOT) + axi_dmac_start_transfer(&dmac->chan); + spin_unlock(&dmac->chan.vchan.lock); + + return IRQ_HANDLED; +} + +static int axi_dmac_terminate_all(struct dma_chan *c) +{ + struct axi_dmac_chan *chan = to_axi_dmac_chan(c); + struct axi_dmac *dmac = chan_to_axi_dmac(chan); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&chan->vchan.lock, flags); + axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0); + chan->next_desc = NULL; + vchan_get_all_descriptors(&chan->vchan, &head); + list_splice_tail_init(&chan->active_descs, &head); + spin_unlock_irqrestore(&chan->vchan.lock, flags); + + vchan_dma_desc_free_list(&chan->vchan, &head); + + return 0; +} + +static void axi_dmac_issue_pending(struct dma_chan *c) +{ + struct axi_dmac_chan *chan = to_axi_dmac_chan(c); + struct axi_dmac *dmac = chan_to_axi_dmac(chan); + unsigned long flags; + + axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE); + + spin_lock_irqsave(&chan->vchan.lock, flags); + if (vchan_issue_pending(&chan->vchan)) + axi_dmac_start_transfer(chan); + spin_unlock_irqrestore(&chan->vchan.lock, flags); +} + +static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) +{ + struct axi_dmac_desc *desc; + + desc = kzalloc(sizeof(struct axi_dmac_desc) + + sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT); + if (!desc) + return NULL; + + desc->num_sgs = num_sgs; + + return desc; +} + +static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( + struct dma_chan *c, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct axi_dmac_chan *chan = to_axi_dmac_chan(c); + struct axi_dmac_desc *desc; + struct scatterlist *sg; + unsigned int i; + + if (direction != chan->direction) + return NULL; + + desc = axi_dmac_alloc_desc(sg_len); + if (!desc) + return NULL; + + for_each_sg(sgl, sg, sg_len, i) { + if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) || + !axi_dmac_check_len(chan, sg_dma_len(sg))) { + kfree(desc); + return NULL; + } + + if (direction == DMA_DEV_TO_MEM) + desc->sg[i].dest_addr = sg_dma_address(sg); + else + desc->sg[i].src_addr = sg_dma_address(sg); + desc->sg[i].x_len = sg_dma_len(sg); + desc->sg[i].y_len = 1; + } + + desc->cyclic = false; + + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); +} + +static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic( + struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long flags) +{ + struct axi_dmac_chan *chan = to_axi_dmac_chan(c); + struct axi_dmac_desc *desc; + unsigned int num_periods, i; + + if (direction != chan->direction) + return NULL; + + if (!axi_dmac_check_len(chan, buf_len) || + !axi_dmac_check_addr(chan, buf_addr)) + return NULL; + + if (period_len == 0 || buf_len % period_len) + return NULL; + + num_periods = buf_len / period_len; + + desc = axi_dmac_alloc_desc(num_periods); + if (!desc) + return NULL; + + for (i = 0; i < num_periods; i++) { + if (direction == DMA_DEV_TO_MEM) + desc->sg[i].dest_addr = buf_addr; + else + desc->sg[i].src_addr = buf_addr; + desc->sg[i].x_len = period_len; + desc->sg[i].y_len = 1; + buf_addr += period_len; + } + + desc->cyclic = true; + + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); +} + +static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved( + struct dma_chan *c, struct dma_interleaved_template *xt, + unsigned long flags) +{ + struct axi_dmac_chan *chan = to_axi_dmac_chan(c); + struct axi_dmac_desc *desc; + size_t dst_icg, src_icg; + + if (xt->frame_size != 1) + return NULL; + + if (xt->dir != chan->direction) + return NULL; + + if (axi_dmac_src_is_mem(chan)) { + if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start)) + return NULL; + } + + if (axi_dmac_dest_is_mem(chan)) { + if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start)) + return NULL; + } + + dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]); + src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]); + + if (chan->hw_2d) { + if (!axi_dmac_check_len(chan, xt->sgl[0].size) || + !axi_dmac_check_len(chan, xt->numf)) + return NULL; + if (xt->sgl[0].size + dst_icg > chan->max_length || + xt->sgl[0].size + src_icg > chan->max_length) + return NULL; + } else { + if (dst_icg != 0 || src_icg != 0) + return NULL; + if (chan->max_length / xt->sgl[0].size < xt->numf) + return NULL; + if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf)) + return NULL; + } + + desc = axi_dmac_alloc_desc(1); + if (!desc) + return NULL; + + if (axi_dmac_src_is_mem(chan)) { + desc->sg[0].src_addr = xt->src_start; + desc->sg[0].src_stride = xt->sgl[0].size + src_icg; + } + + if (axi_dmac_dest_is_mem(chan)) { + desc->sg[0].dest_addr = xt->dst_start; + desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg; + } + + if (chan->hw_2d) { + desc->sg[0].x_len = xt->sgl[0].size; + desc->sg[0].y_len = xt->numf; + } else { + desc->sg[0].x_len = xt->sgl[0].size * xt->numf; + desc->sg[0].y_len = 1; + } + + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); +} + +static void axi_dmac_free_chan_resources(struct dma_chan *c) +{ + vchan_free_chan_resources(to_virt_chan(c)); +} + +static void axi_dmac_desc_free(struct virt_dma_desc *vdesc) +{ + kfree(container_of(vdesc, struct axi_dmac_desc, vdesc)); +} + +/* + * The configuration stored in the devicetree matches the configuration + * parameters of the peripheral instance and allows the driver to know which + * features are implemented and how it should behave. + */ +static int axi_dmac_parse_chan_dt(struct device_node *of_chan, + struct axi_dmac_chan *chan) +{ + u32 val; + int ret; + + ret = of_property_read_u32(of_chan, "reg", &val); + if (ret) + return ret; + + /* We only support 1 channel for now */ + if (val != 0) + return -EINVAL; + + ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val); + if (ret) + return ret; + if (val > AXI_DMAC_BUS_TYPE_FIFO) + return -EINVAL; + chan->src_type = val; + + ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val); + if (ret) + return ret; + if (val > AXI_DMAC_BUS_TYPE_FIFO) + return -EINVAL; + chan->dest_type = val; + + ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val); + if (ret) + return ret; + chan->src_width = val / 8; + + ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val); + if (ret) + return ret; + chan->dest_width = val / 8; + + ret = of_property_read_u32(of_chan, "adi,length-width", &val); + if (ret) + return ret; + + if (val >= 32) + chan->max_length = UINT_MAX; + else + chan->max_length = (1ULL << val) - 1; + + chan->align_mask = max(chan->dest_width, chan->src_width) - 1; + + if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) + chan->direction = DMA_MEM_TO_MEM; + else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) + chan->direction = DMA_MEM_TO_DEV; + else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan)) + chan->direction = DMA_DEV_TO_MEM; + else + chan->direction = DMA_DEV_TO_DEV; + + chan->hw_cyclic = of_property_read_bool(of_chan, "adi,cyclic"); + chan->hw_2d = of_property_read_bool(of_chan, "adi,2d"); + + return 0; +} + +static int axi_dmac_probe(struct platform_device *pdev) +{ + struct device_node *of_channels, *of_chan; + struct dma_device *dma_dev; + struct axi_dmac *dmac; + struct resource *res; + int ret; + + dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); + if (!dmac) + return -ENOMEM; + + dmac->irq = platform_get_irq(pdev, 0); + if (dmac->irq <= 0) + return -EINVAL; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dmac->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(dmac->base)) + return PTR_ERR(dmac->base); + + dmac->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(dmac->clk)) + return PTR_ERR(dmac->clk); + + INIT_LIST_HEAD(&dmac->chan.active_descs); + + of_channels = of_get_child_by_name(pdev->dev.of_node, "adi,channels"); + if (of_channels == NULL) + return -ENODEV; + + for_each_child_of_node(of_channels, of_chan) { + ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan); + if (ret) { + of_node_put(of_chan); + of_node_put(of_channels); + return -EINVAL; + } + } + of_node_put(of_channels); + + pdev->dev.dma_parms = &dmac->dma_parms; + dma_set_max_seg_size(&pdev->dev, dmac->chan.max_length); + + dma_dev = &dmac->dma_dev; + dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); + dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); + dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources; + dma_dev->device_tx_status = dma_cookie_status; + dma_dev->device_issue_pending = axi_dmac_issue_pending; + dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg; + dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic; + dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved; + dma_dev->device_terminate_all = axi_dmac_terminate_all; + dma_dev->dev = &pdev->dev; + dma_dev->chancnt = 1; + dma_dev->src_addr_widths = BIT(dmac->chan.src_width); + dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width); + dma_dev->directions = BIT(dmac->chan.direction); + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + INIT_LIST_HEAD(&dma_dev->channels); + + dmac->chan.vchan.desc_free = axi_dmac_desc_free; + vchan_init(&dmac->chan.vchan, dma_dev); + + ret = clk_prepare_enable(dmac->clk); + if (ret < 0) + return ret; + + axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00); + + ret = dma_async_device_register(dma_dev); + if (ret) + goto err_clk_disable; + + ret = of_dma_controller_register(pdev->dev.of_node, + of_dma_xlate_by_chan_id, dma_dev); + if (ret) + goto err_unregister_device; + + ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, 0, + dev_name(&pdev->dev), dmac); + if (ret) + goto err_unregister_of; + + platform_set_drvdata(pdev, dmac); + + return 0; + +err_unregister_of: + of_dma_controller_free(pdev->dev.of_node); +err_unregister_device: + dma_async_device_unregister(&dmac->dma_dev); +err_clk_disable: + clk_disable_unprepare(dmac->clk); + + return ret; +} + +static int axi_dmac_remove(struct platform_device *pdev) +{ + struct axi_dmac *dmac = platform_get_drvdata(pdev); + + of_dma_controller_free(pdev->dev.of_node); + free_irq(dmac->irq, dmac); + tasklet_kill(&dmac->chan.vchan.task); + dma_async_device_unregister(&dmac->dma_dev); + clk_disable_unprepare(dmac->clk); + + return 0; +} + +static const struct of_device_id axi_dmac_of_match_table[] = { + { .compatible = "adi,axi-dmac-1.00.a" }, + { }, +}; + +static struct platform_driver axi_dmac_driver = { + .driver = { + .name = "dma-axi-dmac", + .of_match_table = axi_dmac_of_match_table, + }, + .probe = axi_dmac_probe, + .remove = axi_dmac_remove, +}; +module_platform_driver(axi_dmac_driver); + +MODULE_AUTHOR("Lars-Peter Clausen "); +MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 26d2f0e09..dade7c47f 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -145,7 +145,8 @@ struct jz4780_dma_dev { struct jz4780_dma_chan chan[JZ_DMA_NR_CHANNELS]; }; -struct jz4780_dma_data { +struct jz4780_dma_filter_data { + struct device_node *of_node; uint32_t transfer_type; int channel; }; @@ -214,11 +215,25 @@ static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc) kfree(desc); } -static uint32_t jz4780_dma_transfer_size(unsigned long val, int *ord) +static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift) { - *ord = ffs(val) - 1; + int ord = ffs(val) - 1; - switch (*ord) { + /* + * 8 byte transfer sizes unsupported so fall back on 4. If it's larger + * than the maximum, just limit it. It is perfectly safe to fall back + * in this way since we won't exceed the maximum burst size supported + * by the device, the only effect is reduced efficiency. This is better + * than refusing to perform the request at all. + */ + if (ord == 3) + ord = 2; + else if (ord > 7) + ord = 7; + + *shift = ord; + + switch (ord) { case 0: return JZ_DMA_SIZE_1_BYTE; case 1: @@ -231,20 +246,17 @@ static uint32_t jz4780_dma_transfer_size(unsigned long val, int *ord) return JZ_DMA_SIZE_32_BYTE; case 6: return JZ_DMA_SIZE_64_BYTE; - case 7: - return JZ_DMA_SIZE_128_BYTE; default: - return -EINVAL; + return JZ_DMA_SIZE_128_BYTE; } } -static uint32_t jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan, +static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan, struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len, enum dma_transfer_direction direction) { struct dma_slave_config *config = &jzchan->config; uint32_t width, maxburst, tsz; - int ord; if (direction == DMA_MEM_TO_DEV) { desc->dcm = JZ_DMA_DCM_SAI; @@ -271,8 +283,8 @@ static uint32_t jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan, * divisible by the transfer size, and we must not use more than the * maximum burst specified by the user. */ - tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst), &ord); - jzchan->transfer_shift = ord; + tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst), + &jzchan->transfer_shift); switch (width) { case DMA_SLAVE_BUSWIDTH_1_BYTE: @@ -289,12 +301,14 @@ static uint32_t jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan, desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT; desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT; - desc->dtc = len >> ord; + desc->dtc = len >> jzchan->transfer_shift; + return 0; } static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, - enum dma_transfer_direction direction, unsigned long flags) + enum dma_transfer_direction direction, unsigned long flags, + void *context) { struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); struct jz4780_dma_desc *desc; @@ -307,12 +321,11 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg( for (i = 0; i < sg_len; i++) { err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], - sg_dma_address(&sgl[i]), - sg_dma_len(&sgl[i]), - direction); + sg_dma_address(&sgl[i]), + sg_dma_len(&sgl[i]), + direction); if (err < 0) - return ERR_PTR(err); - + return NULL; desc->desc[i].dcm |= JZ_DMA_DCM_TIE; @@ -354,9 +367,9 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic( for (i = 0; i < periods; i++) { err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr, - period_len, direction); + period_len, direction); if (err < 0) - return ERR_PTR(err); + return NULL; buf_addr += period_len; @@ -390,15 +403,13 @@ struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy( struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); struct jz4780_dma_desc *desc; uint32_t tsz; - int ord; desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY); if (!desc) return NULL; - tsz = jz4780_dma_transfer_size(dest | src | len, &ord); - if (tsz < 0) - return ERR_PTR(tsz); + tsz = jz4780_dma_transfer_size(dest | src | len, + &jzchan->transfer_shift); desc->desc[0].dsa = src; desc->desc[0].dta = dest; @@ -407,7 +418,7 @@ struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy( tsz << JZ_DMA_DCM_TSZ_SHIFT | JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT | JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT; - desc->desc[0].dtc = len >> ord; + desc->desc[0].dtc = len >> jzchan->transfer_shift; return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags); } @@ -484,8 +495,9 @@ static void jz4780_dma_issue_pending(struct dma_chan *chan) spin_unlock_irqrestore(&jzchan->vchan.lock, flags); } -static int jz4780_dma_terminate_all(struct jz4780_dma_chan *jzchan) +static int jz4780_dma_terminate_all(struct dma_chan *chan) { + struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); unsigned long flags; LIST_HEAD(head); @@ -507,9 +519,11 @@ static int jz4780_dma_terminate_all(struct jz4780_dma_chan *jzchan) return 0; } -static int jz4780_dma_slave_config(struct jz4780_dma_chan *jzchan, - const struct dma_slave_config *config) +static int jz4780_dma_config(struct dma_chan *chan, + struct dma_slave_config *config) { + struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); + if ((config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) || (config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)) return -EINVAL; @@ -567,8 +581,8 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan, txstate->residue = 0; if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc - && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) - status = DMA_ERROR; + && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) + status = DMA_ERROR; spin_unlock_irqrestore(&jzchan->vchan.lock, flags); return status; @@ -671,7 +685,10 @@ static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param) { struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); - struct jz4780_dma_data *data = param; + struct jz4780_dma_filter_data *data = param; + + if (jzdma->dma_device.dev->of_node != data->of_node) + return false; if (data->channel > -1) { if (data->channel != jzchan->id) @@ -690,11 +707,12 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec, { struct jz4780_dma_dev *jzdma = ofdma->of_dma_data; dma_cap_mask_t mask = jzdma->dma_device.cap_mask; - struct jz4780_dma_data data; + struct jz4780_dma_filter_data data; if (dma_spec->args_count != 2) return NULL; + data.of_node = ofdma->of_node; data.transfer_type = dma_spec->args[0]; data.channel = dma_spec->args[1]; @@ -713,9 +731,14 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec, data.channel); return NULL; } - } - return dma_request_channel(mask, jz4780_dma_filter_fn, &data); + jzdma->chan[data.channel].transfer_type = data.transfer_type; + + return dma_get_slave_channel( + &jzdma->chan[data.channel].vchan.chan); + } else { + return dma_request_channel(mask, jz4780_dma_filter_fn, &data); + } } static int jz4780_dma_probe(struct platform_device *pdev) @@ -743,23 +766,26 @@ static int jz4780_dma_probe(struct platform_device *pdev) if (IS_ERR(jzdma->base)) return PTR_ERR(jzdma->base); - jzdma->irq = platform_get_irq(pdev, 0); - if (jzdma->irq < 0) { + ret = platform_get_irq(pdev, 0); + if (ret < 0) { dev_err(dev, "failed to get IRQ: %d\n", ret); - return jzdma->irq; + return ret; } - ret = devm_request_irq(dev, jzdma->irq, jz4780_dma_irq_handler, 0, - dev_name(dev), jzdma); + jzdma->irq = ret; + + ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev), + jzdma); if (ret) { dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq); - return -EINVAL; + return ret; } jzdma->clk = devm_clk_get(dev, NULL); if (IS_ERR(jzdma->clk)) { dev_err(dev, "failed to get clock\n"); - return PTR_ERR(jzdma->clk); + ret = PTR_ERR(jzdma->clk); + goto err_free_irq; } clk_prepare_enable(jzdma->clk); @@ -775,13 +801,13 @@ static int jz4780_dma_probe(struct platform_device *pdev) dma_cap_set(DMA_CYCLIC, dd->cap_mask); dd->dev = dev; - dd->copy_align = 2; /* 2^2 = 4 byte alignment */ + dd->copy_align = DMAENGINE_ALIGN_4_BYTES; dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources; dd->device_free_chan_resources = jz4780_dma_free_chan_resources; dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg; dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic; dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy; - dd->device_config = jz4780_dma_slave_config; + dd->device_config = jz4780_dma_config; dd->device_terminate_all = jz4780_dma_terminate_all; dd->device_tx_status = jz4780_dma_tx_status; dd->device_issue_pending = jz4780_dma_issue_pending; @@ -790,7 +816,6 @@ static int jz4780_dma_probe(struct platform_device *pdev) dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; - /* * Enable DMA controller, mark all channels as not programmable. * Also set the FMSC bit - it increases MSC performance, so it makes @@ -832,15 +857,24 @@ err_unregister_dev: err_disable_clk: clk_disable_unprepare(jzdma->clk); + +err_free_irq: + free_irq(jzdma->irq, jzdma); return ret; } static int jz4780_dma_remove(struct platform_device *pdev) { struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev); + int i; of_dma_controller_free(pdev->dev.of_node); - devm_free_irq(&pdev->dev, jzdma->irq, jzdma); + + free_irq(jzdma->irq, jzdma); + + for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) + tasklet_kill(&jzdma->chan[i].vchan.task); + dma_async_device_unregister(&jzdma->dma_device); return 0; } diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 3ff284c8e..09479d4be 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -554,10 +554,18 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) mutex_lock(&dma_list_mutex); if (chan->client_count == 0) { + struct dma_device *device = chan->device; + + dma_cap_set(DMA_PRIVATE, device->cap_mask); + device->privatecnt++; err = dma_chan_get(chan); - if (err) + if (err) { pr_debug("%s: failed to get %s: (%d)\n", __func__, dma_chan_name(chan), err); + chan = NULL; + if (--device->privatecnt == 0) + dma_cap_clear(DMA_PRIVATE, device->cap_mask); + } } else chan = NULL; diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig index 36e02f0f6..e00c9b022 100644 --- a/drivers/dma/dw/Kconfig +++ b/drivers/dma/dw/Kconfig @@ -6,6 +6,9 @@ config DW_DMAC_CORE tristate select DMA_ENGINE +config DW_DMAC_BIG_ENDIAN_IO + bool + config DW_DMAC tristate "Synopsys DesignWare AHB DMA platform driver" select DW_DMAC_CORE @@ -23,6 +26,3 @@ config DW_DMAC_PCI Support the Synopsys DesignWare AHB DMA controller on the platfroms that enumerate it as a PCI device. For example, Intel Medfield has integrated this GPDMA controller. - -config DW_DMAC_BIG_ENDIAN_IO - bool diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 88853af69..3e5d4f193 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -1000,7 +1000,7 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, * code using dma memcpy must make sure alignment of * length is at dma->copy_align boundary. */ - dma->copy_align = DMA_SLAVE_BUSWIDTH_4_BYTES; + dma->copy_align = DMAENGINE_ALIGN_4_BYTES; INIT_LIST_HEAD(&dma->channels); } diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c index f42f71e37..7669c7dd1 100644 --- a/drivers/dma/hsu/hsu.c +++ b/drivers/dma/hsu/hsu.c @@ -99,21 +99,13 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc) static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc) { - unsigned long flags; - - spin_lock_irqsave(&hsuc->lock, flags); hsu_chan_disable(hsuc); hsu_chan_writel(hsuc, HSU_CH_DCR, 0); - spin_unlock_irqrestore(&hsuc->lock, flags); } static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc) { - unsigned long flags; - - spin_lock_irqsave(&hsuc->lock, flags); hsu_dma_chan_start(hsuc); - spin_unlock_irqrestore(&hsuc->lock, flags); } static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc) @@ -139,9 +131,9 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc) unsigned long flags; u32 sr; - spin_lock_irqsave(&hsuc->lock, flags); + spin_lock_irqsave(&hsuc->vchan.lock, flags); sr = hsu_chan_readl(hsuc, HSU_CH_SR); - spin_unlock_irqrestore(&hsuc->lock, flags); + spin_unlock_irqrestore(&hsuc->vchan.lock, flags); return sr; } @@ -273,14 +265,11 @@ static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc) struct hsu_dma_desc *desc = hsuc->desc; size_t bytes = hsu_dma_desc_size(desc); int i; - unsigned long flags; - spin_lock_irqsave(&hsuc->lock, flags); i = desc->active % HSU_DMA_CHAN_NR_DESC; do { bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i)); } while (--i >= 0); - spin_unlock_irqrestore(&hsuc->lock, flags); return bytes; } @@ -327,24 +316,6 @@ static int hsu_dma_slave_config(struct dma_chan *chan, return 0; } -static void hsu_dma_chan_deactivate(struct hsu_dma_chan *hsuc) -{ - unsigned long flags; - - spin_lock_irqsave(&hsuc->lock, flags); - hsu_chan_disable(hsuc); - spin_unlock_irqrestore(&hsuc->lock, flags); -} - -static void hsu_dma_chan_activate(struct hsu_dma_chan *hsuc) -{ - unsigned long flags; - - spin_lock_irqsave(&hsuc->lock, flags); - hsu_chan_enable(hsuc); - spin_unlock_irqrestore(&hsuc->lock, flags); -} - static int hsu_dma_pause(struct dma_chan *chan) { struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); @@ -352,7 +323,7 @@ static int hsu_dma_pause(struct dma_chan *chan) spin_lock_irqsave(&hsuc->vchan.lock, flags); if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) { - hsu_dma_chan_deactivate(hsuc); + hsu_chan_disable(hsuc); hsuc->desc->status = DMA_PAUSED; } spin_unlock_irqrestore(&hsuc->vchan.lock, flags); @@ -368,7 +339,7 @@ static int hsu_dma_resume(struct dma_chan *chan) spin_lock_irqsave(&hsuc->vchan.lock, flags); if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) { hsuc->desc->status = DMA_IN_PROGRESS; - hsu_dma_chan_activate(hsuc); + hsu_chan_enable(hsuc); } spin_unlock_irqrestore(&hsuc->vchan.lock, flags); @@ -441,8 +412,6 @@ int hsu_dma_probe(struct hsu_dma_chip *chip) hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH; - - spin_lock_init(&hsuc->lock); } dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask); diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h index 0275233cf..eeb9fff66 100644 --- a/drivers/dma/hsu/hsu.h +++ b/drivers/dma/hsu/hsu.h @@ -78,7 +78,6 @@ struct hsu_dma_chan { struct virt_dma_chan vchan; void __iomem *reg; - spinlock_t lock; /* hardware configuration */ enum dma_transfer_direction direction; diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c new file mode 100644 index 000000000..48d6d9e94 --- /dev/null +++ b/drivers/dma/idma64.c @@ -0,0 +1,710 @@ +/* + * Core driver for the Intel integrated DMA 64-bit + * + * Copyright (C) 2015 Intel Corporation + * Author: Andy Shevchenko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "idma64.h" + +/* Platform driver name */ +#define DRV_NAME "idma64" + +/* For now we support only two channels */ +#define IDMA64_NR_CHAN 2 + +/* ---------------------------------------------------------------------- */ + +static struct device *chan2dev(struct dma_chan *chan) +{ + return &chan->dev->device; +} + +/* ---------------------------------------------------------------------- */ + +static void idma64_off(struct idma64 *idma64) +{ + unsigned short count = 100; + + dma_writel(idma64, CFG, 0); + + channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask); + channel_clear_bit(idma64, MASK(BLOCK), idma64->all_chan_mask); + channel_clear_bit(idma64, MASK(SRC_TRAN), idma64->all_chan_mask); + channel_clear_bit(idma64, MASK(DST_TRAN), idma64->all_chan_mask); + channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask); + + do { + cpu_relax(); + } while (dma_readl(idma64, CFG) & IDMA64_CFG_DMA_EN && --count); +} + +static void idma64_on(struct idma64 *idma64) +{ + dma_writel(idma64, CFG, IDMA64_CFG_DMA_EN); +} + +/* ---------------------------------------------------------------------- */ + +static void idma64_chan_init(struct idma64 *idma64, struct idma64_chan *idma64c) +{ + u32 cfghi = IDMA64C_CFGH_SRC_PER(1) | IDMA64C_CFGH_DST_PER(0); + u32 cfglo = 0; + + /* Enforce FIFO drain when channel is suspended */ + cfglo |= IDMA64C_CFGL_CH_DRAIN; + + /* Set default burst alignment */ + cfglo |= IDMA64C_CFGL_DST_BURST_ALIGN | IDMA64C_CFGL_SRC_BURST_ALIGN; + + channel_writel(idma64c, CFG_LO, cfglo); + channel_writel(idma64c, CFG_HI, cfghi); + + /* Enable interrupts */ + channel_set_bit(idma64, MASK(XFER), idma64c->mask); + channel_set_bit(idma64, MASK(ERROR), idma64c->mask); + + /* + * Enforce the controller to be turned on. + * + * The iDMA is turned off in ->probe() and looses context during system + * suspend / resume cycle. That's why we have to enable it each time we + * use it. + */ + idma64_on(idma64); +} + +static void idma64_chan_stop(struct idma64 *idma64, struct idma64_chan *idma64c) +{ + channel_clear_bit(idma64, CH_EN, idma64c->mask); +} + +static void idma64_chan_start(struct idma64 *idma64, struct idma64_chan *idma64c) +{ + struct idma64_desc *desc = idma64c->desc; + struct idma64_hw_desc *hw = &desc->hw[0]; + + channel_writeq(idma64c, SAR, 0); + channel_writeq(idma64c, DAR, 0); + + channel_writel(idma64c, CTL_HI, IDMA64C_CTLH_BLOCK_TS(~0UL)); + channel_writel(idma64c, CTL_LO, IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN); + + channel_writeq(idma64c, LLP, hw->llp); + + channel_set_bit(idma64, CH_EN, idma64c->mask); +} + +static void idma64_stop_transfer(struct idma64_chan *idma64c) +{ + struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device); + + idma64_chan_stop(idma64, idma64c); +} + +static void idma64_start_transfer(struct idma64_chan *idma64c) +{ + struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device); + struct virt_dma_desc *vdesc; + + /* Get the next descriptor */ + vdesc = vchan_next_desc(&idma64c->vchan); + if (!vdesc) { + idma64c->desc = NULL; + return; + } + + list_del(&vdesc->node); + idma64c->desc = to_idma64_desc(vdesc); + + /* Configure the channel */ + idma64_chan_init(idma64, idma64c); + + /* Start the channel with a new descriptor */ + idma64_chan_start(idma64, idma64c); +} + +/* ---------------------------------------------------------------------- */ + +static void idma64_chan_irq(struct idma64 *idma64, unsigned short c, + u32 status_err, u32 status_xfer) +{ + struct idma64_chan *idma64c = &idma64->chan[c]; + struct idma64_desc *desc; + unsigned long flags; + + spin_lock_irqsave(&idma64c->vchan.lock, flags); + desc = idma64c->desc; + if (desc) { + if (status_err & (1 << c)) { + dma_writel(idma64, CLEAR(ERROR), idma64c->mask); + desc->status = DMA_ERROR; + } else if (status_xfer & (1 << c)) { + dma_writel(idma64, CLEAR(XFER), idma64c->mask); + desc->status = DMA_COMPLETE; + vchan_cookie_complete(&desc->vdesc); + idma64_start_transfer(idma64c); + } + + /* idma64_start_transfer() updates idma64c->desc */ + if (idma64c->desc == NULL || desc->status == DMA_ERROR) + idma64_stop_transfer(idma64c); + } + spin_unlock_irqrestore(&idma64c->vchan.lock, flags); +} + +static irqreturn_t idma64_irq(int irq, void *dev) +{ + struct idma64 *idma64 = dev; + u32 status = dma_readl(idma64, STATUS_INT); + u32 status_xfer; + u32 status_err; + unsigned short i; + + dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status); + + /* Check if we have any interrupt from the DMA controller */ + if (!status) + return IRQ_NONE; + + /* Disable interrupts */ + channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask); + channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask); + + status_xfer = dma_readl(idma64, RAW(XFER)); + status_err = dma_readl(idma64, RAW(ERROR)); + + for (i = 0; i < idma64->dma.chancnt; i++) + idma64_chan_irq(idma64, i, status_err, status_xfer); + + /* Re-enable interrupts */ + channel_set_bit(idma64, MASK(XFER), idma64->all_chan_mask); + channel_set_bit(idma64, MASK(ERROR), idma64->all_chan_mask); + + return IRQ_HANDLED; +} + +/* ---------------------------------------------------------------------- */ + +static struct idma64_desc *idma64_alloc_desc(unsigned int ndesc) +{ + struct idma64_desc *desc; + + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); + if (!desc) + return NULL; + + desc->hw = kcalloc(ndesc, sizeof(*desc->hw), GFP_NOWAIT); + if (!desc->hw) { + kfree(desc); + return NULL; + } + + return desc; +} + +static void idma64_desc_free(struct idma64_chan *idma64c, + struct idma64_desc *desc) +{ + struct idma64_hw_desc *hw; + + if (desc->ndesc) { + unsigned int i = desc->ndesc; + + do { + hw = &desc->hw[--i]; + dma_pool_free(idma64c->pool, hw->lli, hw->llp); + } while (i); + } + + kfree(desc->hw); + kfree(desc); +} + +static void idma64_vdesc_free(struct virt_dma_desc *vdesc) +{ + struct idma64_chan *idma64c = to_idma64_chan(vdesc->tx.chan); + + idma64_desc_free(idma64c, to_idma64_desc(vdesc)); +} + +static u64 idma64_hw_desc_fill(struct idma64_hw_desc *hw, + struct dma_slave_config *config, + enum dma_transfer_direction direction, u64 llp) +{ + struct idma64_lli *lli = hw->lli; + u64 sar, dar; + u32 ctlhi = IDMA64C_CTLH_BLOCK_TS(hw->len); + u32 ctllo = IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN; + u32 src_width, dst_width; + + if (direction == DMA_MEM_TO_DEV) { + sar = hw->phys; + dar = config->dst_addr; + ctllo |= IDMA64C_CTLL_DST_FIX | IDMA64C_CTLL_SRC_INC | + IDMA64C_CTLL_FC_M2P; + src_width = min_t(u32, 2, __fls(sar | hw->len)); + dst_width = __fls(config->dst_addr_width); + } else { /* DMA_DEV_TO_MEM */ + sar = config->src_addr; + dar = hw->phys; + ctllo |= IDMA64C_CTLL_DST_INC | IDMA64C_CTLL_SRC_FIX | + IDMA64C_CTLL_FC_P2M; + src_width = __fls(config->src_addr_width); + dst_width = min_t(u32, 2, __fls(dar | hw->len)); + } + + lli->sar = sar; + lli->dar = dar; + + lli->ctlhi = ctlhi; + lli->ctllo = ctllo | + IDMA64C_CTLL_SRC_MSIZE(config->src_maxburst) | + IDMA64C_CTLL_DST_MSIZE(config->dst_maxburst) | + IDMA64C_CTLL_DST_WIDTH(dst_width) | + IDMA64C_CTLL_SRC_WIDTH(src_width); + + lli->llp = llp; + return hw->llp; +} + +static void idma64_desc_fill(struct idma64_chan *idma64c, + struct idma64_desc *desc) +{ + struct dma_slave_config *config = &idma64c->config; + struct idma64_hw_desc *hw = &desc->hw[desc->ndesc - 1]; + struct idma64_lli *lli = hw->lli; + u64 llp = 0; + unsigned int i = desc->ndesc; + + /* Fill the hardware descriptors and link them to a list */ + do { + hw = &desc->hw[--i]; + llp = idma64_hw_desc_fill(hw, config, desc->direction, llp); + desc->length += hw->len; + } while (i); + + /* Trigger interrupt after last block */ + lli->ctllo |= IDMA64C_CTLL_INT_EN; +} + +static struct dma_async_tx_descriptor *idma64_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct idma64_chan *idma64c = to_idma64_chan(chan); + struct idma64_desc *desc; + struct scatterlist *sg; + unsigned int i; + + desc = idma64_alloc_desc(sg_len); + if (!desc) + return NULL; + + for_each_sg(sgl, sg, sg_len, i) { + struct idma64_hw_desc *hw = &desc->hw[i]; + + /* Allocate DMA capable memory for hardware descriptor */ + hw->lli = dma_pool_alloc(idma64c->pool, GFP_NOWAIT, &hw->llp); + if (!hw->lli) { + desc->ndesc = i; + idma64_desc_free(idma64c, desc); + return NULL; + } + + hw->phys = sg_dma_address(sg); + hw->len = sg_dma_len(sg); + } + + desc->ndesc = sg_len; + desc->direction = direction; + desc->status = DMA_IN_PROGRESS; + + idma64_desc_fill(idma64c, desc); + return vchan_tx_prep(&idma64c->vchan, &desc->vdesc, flags); +} + +static void idma64_issue_pending(struct dma_chan *chan) +{ + struct idma64_chan *idma64c = to_idma64_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&idma64c->vchan.lock, flags); + if (vchan_issue_pending(&idma64c->vchan) && !idma64c->desc) + idma64_start_transfer(idma64c); + spin_unlock_irqrestore(&idma64c->vchan.lock, flags); +} + +static size_t idma64_active_desc_size(struct idma64_chan *idma64c) +{ + struct idma64_desc *desc = idma64c->desc; + struct idma64_hw_desc *hw; + size_t bytes = desc->length; + u64 llp = channel_readq(idma64c, LLP); + u32 ctlhi = channel_readl(idma64c, CTL_HI); + unsigned int i = 0; + + do { + hw = &desc->hw[i]; + if (hw->llp == llp) + break; + bytes -= hw->len; + } while (++i < desc->ndesc); + + if (!i) + return bytes; + + /* The current chunk is not fully transfered yet */ + bytes += desc->hw[--i].len; + + return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi); +} + +static enum dma_status idma64_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *state) +{ + struct idma64_chan *idma64c = to_idma64_chan(chan); + struct virt_dma_desc *vdesc; + enum dma_status status; + size_t bytes; + unsigned long flags; + + status = dma_cookie_status(chan, cookie, state); + if (status == DMA_COMPLETE) + return status; + + spin_lock_irqsave(&idma64c->vchan.lock, flags); + vdesc = vchan_find_desc(&idma64c->vchan, cookie); + if (idma64c->desc && cookie == idma64c->desc->vdesc.tx.cookie) { + bytes = idma64_active_desc_size(idma64c); + dma_set_residue(state, bytes); + status = idma64c->desc->status; + } else if (vdesc) { + bytes = to_idma64_desc(vdesc)->length; + dma_set_residue(state, bytes); + } + spin_unlock_irqrestore(&idma64c->vchan.lock, flags); + + return status; +} + +static void convert_burst(u32 *maxburst) +{ + if (*maxburst) + *maxburst = __fls(*maxburst); + else + *maxburst = 0; +} + +static int idma64_slave_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct idma64_chan *idma64c = to_idma64_chan(chan); + + /* Check if chan will be configured for slave transfers */ + if (!is_slave_direction(config->direction)) + return -EINVAL; + + memcpy(&idma64c->config, config, sizeof(idma64c->config)); + + convert_burst(&idma64c->config.src_maxburst); + convert_burst(&idma64c->config.dst_maxburst); + + return 0; +} + +static void idma64_chan_deactivate(struct idma64_chan *idma64c) +{ + unsigned short count = 100; + u32 cfglo; + + cfglo = channel_readl(idma64c, CFG_LO); + channel_writel(idma64c, CFG_LO, cfglo | IDMA64C_CFGL_CH_SUSP); + do { + udelay(1); + cfglo = channel_readl(idma64c, CFG_LO); + } while (!(cfglo & IDMA64C_CFGL_FIFO_EMPTY) && --count); +} + +static void idma64_chan_activate(struct idma64_chan *idma64c) +{ + u32 cfglo; + + cfglo = channel_readl(idma64c, CFG_LO); + channel_writel(idma64c, CFG_LO, cfglo & ~IDMA64C_CFGL_CH_SUSP); +} + +static int idma64_pause(struct dma_chan *chan) +{ + struct idma64_chan *idma64c = to_idma64_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&idma64c->vchan.lock, flags); + if (idma64c->desc && idma64c->desc->status == DMA_IN_PROGRESS) { + idma64_chan_deactivate(idma64c); + idma64c->desc->status = DMA_PAUSED; + } + spin_unlock_irqrestore(&idma64c->vchan.lock, flags); + + return 0; +} + +static int idma64_resume(struct dma_chan *chan) +{ + struct idma64_chan *idma64c = to_idma64_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&idma64c->vchan.lock, flags); + if (idma64c->desc && idma64c->desc->status == DMA_PAUSED) { + idma64c->desc->status = DMA_IN_PROGRESS; + idma64_chan_activate(idma64c); + } + spin_unlock_irqrestore(&idma64c->vchan.lock, flags); + + return 0; +} + +static int idma64_terminate_all(struct dma_chan *chan) +{ + struct idma64_chan *idma64c = to_idma64_chan(chan); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&idma64c->vchan.lock, flags); + idma64_chan_deactivate(idma64c); + idma64_stop_transfer(idma64c); + if (idma64c->desc) { + idma64_vdesc_free(&idma64c->desc->vdesc); + idma64c->desc = NULL; + } + vchan_get_all_descriptors(&idma64c->vchan, &head); + spin_unlock_irqrestore(&idma64c->vchan.lock, flags); + + vchan_dma_desc_free_list(&idma64c->vchan, &head); + return 0; +} + +static int idma64_alloc_chan_resources(struct dma_chan *chan) +{ + struct idma64_chan *idma64c = to_idma64_chan(chan); + + /* Create a pool of consistent memory blocks for hardware descriptors */ + idma64c->pool = dma_pool_create(dev_name(chan2dev(chan)), + chan->device->dev, + sizeof(struct idma64_lli), 8, 0); + if (!idma64c->pool) { + dev_err(chan2dev(chan), "No memory for descriptors\n"); + return -ENOMEM; + } + + return 0; +} + +static void idma64_free_chan_resources(struct dma_chan *chan) +{ + struct idma64_chan *idma64c = to_idma64_chan(chan); + + vchan_free_chan_resources(to_virt_chan(chan)); + dma_pool_destroy(idma64c->pool); + idma64c->pool = NULL; +} + +/* ---------------------------------------------------------------------- */ + +#define IDMA64_BUSWIDTHS \ + BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) + +static int idma64_probe(struct idma64_chip *chip) +{ + struct idma64 *idma64; + unsigned short nr_chan = IDMA64_NR_CHAN; + unsigned short i; + int ret; + + idma64 = devm_kzalloc(chip->dev, sizeof(*idma64), GFP_KERNEL); + if (!idma64) + return -ENOMEM; + + idma64->regs = chip->regs; + chip->idma64 = idma64; + + idma64->chan = devm_kcalloc(chip->dev, nr_chan, sizeof(*idma64->chan), + GFP_KERNEL); + if (!idma64->chan) + return -ENOMEM; + + idma64->all_chan_mask = (1 << nr_chan) - 1; + + /* Turn off iDMA controller */ + idma64_off(idma64); + + ret = devm_request_irq(chip->dev, chip->irq, idma64_irq, IRQF_SHARED, + dev_name(chip->dev), idma64); + if (ret) + return ret; + + INIT_LIST_HEAD(&idma64->dma.channels); + for (i = 0; i < nr_chan; i++) { + struct idma64_chan *idma64c = &idma64->chan[i]; + + idma64c->vchan.desc_free = idma64_vdesc_free; + vchan_init(&idma64c->vchan, &idma64->dma); + + idma64c->regs = idma64->regs + i * IDMA64_CH_LENGTH; + idma64c->mask = BIT(i); + } + + dma_cap_set(DMA_SLAVE, idma64->dma.cap_mask); + dma_cap_set(DMA_PRIVATE, idma64->dma.cap_mask); + + idma64->dma.device_alloc_chan_resources = idma64_alloc_chan_resources; + idma64->dma.device_free_chan_resources = idma64_free_chan_resources; + + idma64->dma.device_prep_slave_sg = idma64_prep_slave_sg; + + idma64->dma.device_issue_pending = idma64_issue_pending; + idma64->dma.device_tx_status = idma64_tx_status; + + idma64->dma.device_config = idma64_slave_config; + idma64->dma.device_pause = idma64_pause; + idma64->dma.device_resume = idma64_resume; + idma64->dma.device_terminate_all = idma64_terminate_all; + + idma64->dma.src_addr_widths = IDMA64_BUSWIDTHS; + idma64->dma.dst_addr_widths = IDMA64_BUSWIDTHS; + idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + + idma64->dma.dev = chip->dev; + + ret = dma_async_device_register(&idma64->dma); + if (ret) + return ret; + + dev_info(chip->dev, "Found Intel integrated DMA 64-bit\n"); + return 0; +} + +static int idma64_remove(struct idma64_chip *chip) +{ + struct idma64 *idma64 = chip->idma64; + unsigned short i; + + dma_async_device_unregister(&idma64->dma); + + /* + * Explicitly call devm_request_irq() to avoid the side effects with + * the scheduled tasklets. + */ + devm_free_irq(chip->dev, chip->irq, idma64); + + for (i = 0; i < idma64->dma.chancnt; i++) { + struct idma64_chan *idma64c = &idma64->chan[i]; + + tasklet_kill(&idma64c->vchan.task); + } + + return 0; +} + +/* ---------------------------------------------------------------------- */ + +static int idma64_platform_probe(struct platform_device *pdev) +{ + struct idma64_chip *chip; + struct device *dev = &pdev->dev; + struct resource *mem; + int ret; + + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->irq = platform_get_irq(pdev, 0); + if (chip->irq < 0) + return chip->irq; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + chip->regs = devm_ioremap_resource(dev, mem); + if (IS_ERR(chip->regs)) + return PTR_ERR(chip->regs); + + ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) + return ret; + + chip->dev = dev; + + ret = idma64_probe(chip); + if (ret) + return ret; + + platform_set_drvdata(pdev, chip); + return 0; +} + +static int idma64_platform_remove(struct platform_device *pdev) +{ + struct idma64_chip *chip = platform_get_drvdata(pdev); + + return idma64_remove(chip); +} + +#ifdef CONFIG_PM_SLEEP + +static int idma64_pm_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct idma64_chip *chip = platform_get_drvdata(pdev); + + idma64_off(chip->idma64); + return 0; +} + +static int idma64_pm_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct idma64_chip *chip = platform_get_drvdata(pdev); + + idma64_on(chip->idma64); + return 0; +} + +#endif /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops idma64_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(idma64_pm_suspend, idma64_pm_resume) +}; + +static struct platform_driver idma64_platform_driver = { + .probe = idma64_platform_probe, + .remove = idma64_platform_remove, + .driver = { + .name = DRV_NAME, + .pm = &idma64_dev_pm_ops, + }, +}; + +module_platform_driver(idma64_platform_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("iDMA64 core driver"); +MODULE_AUTHOR("Andy Shevchenko "); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h new file mode 100644 index 000000000..a4d99685a --- /dev/null +++ b/drivers/dma/idma64.h @@ -0,0 +1,233 @@ +/* + * Driver for the Intel integrated DMA 64-bit + * + * Copyright (C) 2015 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __DMA_IDMA64_H__ +#define __DMA_IDMA64_H__ + +#include +#include +#include +#include + +#include "virt-dma.h" + +/* Channel registers */ + +#define IDMA64_CH_SAR 0x00 /* Source Address Register */ +#define IDMA64_CH_DAR 0x08 /* Destination Address Register */ +#define IDMA64_CH_LLP 0x10 /* Linked List Pointer */ +#define IDMA64_CH_CTL_LO 0x18 /* Control Register Low */ +#define IDMA64_CH_CTL_HI 0x1c /* Control Register High */ +#define IDMA64_CH_SSTAT 0x20 +#define IDMA64_CH_DSTAT 0x28 +#define IDMA64_CH_SSTATAR 0x30 +#define IDMA64_CH_DSTATAR 0x38 +#define IDMA64_CH_CFG_LO 0x40 /* Configuration Register Low */ +#define IDMA64_CH_CFG_HI 0x44 /* Configuration Register High */ +#define IDMA64_CH_SGR 0x48 +#define IDMA64_CH_DSR 0x50 + +#define IDMA64_CH_LENGTH 0x58 + +/* Bitfields in CTL_LO */ +#define IDMA64C_CTLL_INT_EN (1 << 0) /* irqs enabled? */ +#define IDMA64C_CTLL_DST_WIDTH(x) ((x) << 1) /* bytes per element */ +#define IDMA64C_CTLL_SRC_WIDTH(x) ((x) << 4) +#define IDMA64C_CTLL_DST_INC (0 << 8) /* DAR update/not */ +#define IDMA64C_CTLL_DST_FIX (1 << 8) +#define IDMA64C_CTLL_SRC_INC (0 << 10) /* SAR update/not */ +#define IDMA64C_CTLL_SRC_FIX (1 << 10) +#define IDMA64C_CTLL_DST_MSIZE(x) ((x) << 11) /* burst, #elements */ +#define IDMA64C_CTLL_SRC_MSIZE(x) ((x) << 14) +#define IDMA64C_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ +#define IDMA64C_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ +#define IDMA64C_CTLL_LLP_D_EN (1 << 27) /* dest block chain */ +#define IDMA64C_CTLL_LLP_S_EN (1 << 28) /* src block chain */ + +/* Bitfields in CTL_HI */ +#define IDMA64C_CTLH_BLOCK_TS(x) ((x) & ((1 << 17) - 1)) +#define IDMA64C_CTLH_DONE (1 << 17) + +/* Bitfields in CFG_LO */ +#define IDMA64C_CFGL_DST_BURST_ALIGN (1 << 0) /* dst burst align */ +#define IDMA64C_CFGL_SRC_BURST_ALIGN (1 << 1) /* src burst align */ +#define IDMA64C_CFGL_CH_SUSP (1 << 8) +#define IDMA64C_CFGL_FIFO_EMPTY (1 << 9) +#define IDMA64C_CFGL_CH_DRAIN (1 << 10) /* drain FIFO */ +#define IDMA64C_CFGL_DST_OPT_BL (1 << 20) /* optimize dst burst length */ +#define IDMA64C_CFGL_SRC_OPT_BL (1 << 21) /* optimize src burst length */ + +/* Bitfields in CFG_HI */ +#define IDMA64C_CFGH_SRC_PER(x) ((x) << 0) /* src peripheral */ +#define IDMA64C_CFGH_DST_PER(x) ((x) << 4) /* dst peripheral */ +#define IDMA64C_CFGH_RD_ISSUE_THD(x) ((x) << 8) +#define IDMA64C_CFGH_RW_ISSUE_THD(x) ((x) << 18) + +/* Interrupt registers */ + +#define IDMA64_INT_XFER 0x00 +#define IDMA64_INT_BLOCK 0x08 +#define IDMA64_INT_SRC_TRAN 0x10 +#define IDMA64_INT_DST_TRAN 0x18 +#define IDMA64_INT_ERROR 0x20 + +#define IDMA64_RAW(x) (0x2c0 + IDMA64_INT_##x) /* r */ +#define IDMA64_STATUS(x) (0x2e8 + IDMA64_INT_##x) /* r (raw & mask) */ +#define IDMA64_MASK(x) (0x310 + IDMA64_INT_##x) /* rw (set = irq enabled) */ +#define IDMA64_CLEAR(x) (0x338 + IDMA64_INT_##x) /* w (ack, affects "raw") */ + +/* Common registers */ + +#define IDMA64_STATUS_INT 0x360 /* r */ +#define IDMA64_CFG 0x398 +#define IDMA64_CH_EN 0x3a0 + +/* Bitfields in CFG */ +#define IDMA64_CFG_DMA_EN (1 << 0) + +/* Hardware descriptor for Linked LIst transfers */ +struct idma64_lli { + u64 sar; + u64 dar; + u64 llp; + u32 ctllo; + u32 ctlhi; + u32 sstat; + u32 dstat; +}; + +struct idma64_hw_desc { + struct idma64_lli *lli; + dma_addr_t llp; + dma_addr_t phys; + unsigned int len; +}; + +struct idma64_desc { + struct virt_dma_desc vdesc; + enum dma_transfer_direction direction; + struct idma64_hw_desc *hw; + unsigned int ndesc; + size_t length; + enum dma_status status; +}; + +static inline struct idma64_desc *to_idma64_desc(struct virt_dma_desc *vdesc) +{ + return container_of(vdesc, struct idma64_desc, vdesc); +} + +struct idma64_chan { + struct virt_dma_chan vchan; + + void __iomem *regs; + + /* hardware configuration */ + enum dma_transfer_direction direction; + unsigned int mask; + struct dma_slave_config config; + + void *pool; + struct idma64_desc *desc; +}; + +static inline struct idma64_chan *to_idma64_chan(struct dma_chan *chan) +{ + return container_of(chan, struct idma64_chan, vchan.chan); +} + +#define channel_set_bit(idma64, reg, mask) \ + dma_writel(idma64, reg, ((mask) << 8) | (mask)) +#define channel_clear_bit(idma64, reg, mask) \ + dma_writel(idma64, reg, ((mask) << 8) | 0) + +static inline u32 idma64c_readl(struct idma64_chan *idma64c, int offset) +{ + return readl(idma64c->regs + offset); +} + +static inline void idma64c_writel(struct idma64_chan *idma64c, int offset, + u32 value) +{ + writel(value, idma64c->regs + offset); +} + +#define channel_readl(idma64c, reg) \ + idma64c_readl(idma64c, IDMA64_CH_##reg) +#define channel_writel(idma64c, reg, value) \ + idma64c_writel(idma64c, IDMA64_CH_##reg, (value)) + +static inline u64 idma64c_readq(struct idma64_chan *idma64c, int offset) +{ + u64 l, h; + + l = idma64c_readl(idma64c, offset); + h = idma64c_readl(idma64c, offset + 4); + + return l | (h << 32); +} + +static inline void idma64c_writeq(struct idma64_chan *idma64c, int offset, + u64 value) +{ + idma64c_writel(idma64c, offset, value); + idma64c_writel(idma64c, offset + 4, value >> 32); +} + +#define channel_readq(idma64c, reg) \ + idma64c_readq(idma64c, IDMA64_CH_##reg) +#define channel_writeq(idma64c, reg, value) \ + idma64c_writeq(idma64c, IDMA64_CH_##reg, (value)) + +struct idma64 { + struct dma_device dma; + + void __iomem *regs; + + /* channels */ + unsigned short all_chan_mask; + struct idma64_chan *chan; +}; + +static inline struct idma64 *to_idma64(struct dma_device *ddev) +{ + return container_of(ddev, struct idma64, dma); +} + +static inline u32 idma64_readl(struct idma64 *idma64, int offset) +{ + return readl(idma64->regs + offset); +} + +static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value) +{ + writel(value, idma64->regs + offset); +} + +#define dma_readl(idma64, reg) \ + idma64_readl(idma64, IDMA64_##reg) +#define dma_writel(idma64, reg, value) \ + idma64_writel(idma64, IDMA64_##reg, (value)) + +/** + * struct idma64_chip - representation of DesignWare DMA controller hardware + * @dev: struct device of the DMA controller + * @irq: irq line + * @regs: memory mapped I/O space + * @idma64: struct idma64 that is filed by idma64_probe() + */ +struct idma64_chip { + struct device *dev; + int irq; + void __iomem *regs; + struct idma64 *idma64; +}; + +#endif /* __DMA_IDMA64_H__ */ diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 865501fcc..48d85f8b9 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -1083,8 +1083,12 @@ static int __init imxdma_probe(struct platform_device *pdev) if (IS_ERR(imxdma->dma_ahb)) return PTR_ERR(imxdma->dma_ahb); - clk_prepare_enable(imxdma->dma_ipg); - clk_prepare_enable(imxdma->dma_ahb); + ret = clk_prepare_enable(imxdma->dma_ipg); + if (ret) + return ret; + ret = clk_prepare_enable(imxdma->dma_ahb); + if (ret) + goto disable_dma_ipg_clk; /* reset DMA module */ imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR); @@ -1094,20 +1098,20 @@ static int __init imxdma_probe(struct platform_device *pdev) dma_irq_handler, 0, "DMA", imxdma); if (ret) { dev_warn(imxdma->dev, "Can't register IRQ for DMA\n"); - goto err; + goto disable_dma_ahb_clk; } irq_err = platform_get_irq(pdev, 1); if (irq_err < 0) { ret = irq_err; - goto err; + goto disable_dma_ahb_clk; } ret = devm_request_irq(&pdev->dev, irq_err, imxdma_err_handler, 0, "DMA", imxdma); if (ret) { dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n"); - goto err; + goto disable_dma_ahb_clk; } } @@ -1144,7 +1148,7 @@ static int __init imxdma_probe(struct platform_device *pdev) dev_warn(imxdma->dev, "Can't register IRQ %d " "for DMA channel %d\n", irq + i, i); - goto err; + goto disable_dma_ahb_clk; } init_timer(&imxdmac->watchdog); imxdmac->watchdog.function = &imxdma_watchdog; @@ -1183,14 +1187,14 @@ static int __init imxdma_probe(struct platform_device *pdev) platform_set_drvdata(pdev, imxdma); - imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */ + imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES; imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms; dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff); ret = dma_async_device_register(&imxdma->dma_device); if (ret) { dev_err(&pdev->dev, "unable to register\n"); - goto err; + goto disable_dma_ahb_clk; } if (pdev->dev.of_node) { @@ -1206,9 +1210,10 @@ static int __init imxdma_probe(struct platform_device *pdev) err_of_dma_controller: dma_async_device_unregister(&imxdma->dma_device); -err: - clk_disable_unprepare(imxdma->dma_ipg); +disable_dma_ahb_clk: clk_disable_unprepare(imxdma->dma_ahb); +disable_dma_ipg_clk: + clk_disable_unprepare(imxdma->dma_ipg); return ret; } diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 47e126c58..97eaa32d8 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -35,12 +35,16 @@ #include #include #include +#include #include #include #include #include #include +#include +#include +#include #include "dmaengine.h" @@ -123,6 +127,56 @@ */ #define CHANGE_ENDIANNESS 0x80 +/* + * p_2_p watermark_level description + * Bits Name Description + * 0-7 Lower WML Lower watermark level + * 8 PS 1: Pad Swallowing + * 0: No Pad Swallowing + * 9 PA 1: Pad Adding + * 0: No Pad Adding + * 10 SPDIF If this bit is set both source + * and destination are on SPBA + * 11 Source Bit(SP) 1: Source on SPBA + * 0: Source on AIPS + * 12 Destination Bit(DP) 1: Destination on SPBA + * 0: Destination on AIPS + * 13-15 --------- MUST BE 0 + * 16-23 Higher WML HWML + * 24-27 N Total number of samples after + * which Pad adding/Swallowing + * must be done. It must be odd. + * 28 Lower WML Event(LWE) SDMA events reg to check for + * LWML event mask + * 0: LWE in EVENTS register + * 1: LWE in EVENTS2 register + * 29 Higher WML Event(HWE) SDMA events reg to check for + * HWML event mask + * 0: HWE in EVENTS register + * 1: HWE in EVENTS2 register + * 30 --------- MUST BE 0 + * 31 CONT 1: Amount of samples to be + * transferred is unknown and + * script will keep on + * transferring samples as long as + * both events are detected and + * script must be manually stopped + * by the application + * 0: The amount of samples to be + * transferred is equal to the + * count field of mode word + */ +#define SDMA_WATERMARK_LEVEL_LWML 0xFF +#define SDMA_WATERMARK_LEVEL_PS BIT(8) +#define SDMA_WATERMARK_LEVEL_PA BIT(9) +#define SDMA_WATERMARK_LEVEL_SPDIF BIT(10) +#define SDMA_WATERMARK_LEVEL_SP BIT(11) +#define SDMA_WATERMARK_LEVEL_DP BIT(12) +#define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16) +#define SDMA_WATERMARK_LEVEL_LWE BIT(28) +#define SDMA_WATERMARK_LEVEL_HWE BIT(29) +#define SDMA_WATERMARK_LEVEL_CONT BIT(31) + /* * Mode/Count of data node descriptors - IPCv2 */ @@ -259,8 +313,9 @@ struct sdma_channel { struct sdma_buffer_descriptor *bd; dma_addr_t bd_phys; unsigned int pc_from_device, pc_to_device; + unsigned int device_to_device; unsigned long flags; - dma_addr_t per_address; + dma_addr_t per_address, per_address2; unsigned long event_mask[2]; unsigned long watermark_level; u32 shp_addr, per_addr; @@ -328,6 +383,8 @@ struct sdma_engine { u32 script_number; struct sdma_script_start_addrs *script_addrs; const struct sdma_driver_data *drvdata; + u32 spba_start_addr; + u32 spba_end_addr; }; static struct sdma_driver_data sdma_imx31 = { @@ -705,6 +762,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac, sdmac->pc_from_device = 0; sdmac->pc_to_device = 0; + sdmac->device_to_device = 0; switch (peripheral_type) { case IMX_DMATYPE_MEMORY: @@ -780,6 +838,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac, sdmac->pc_from_device = per_2_emi; sdmac->pc_to_device = emi_2_per; + sdmac->device_to_device = per_2_per; } static int sdma_load_context(struct sdma_channel *sdmac) @@ -792,11 +851,12 @@ static int sdma_load_context(struct sdma_channel *sdmac) int ret; unsigned long flags; - if (sdmac->direction == DMA_DEV_TO_MEM) { + if (sdmac->direction == DMA_DEV_TO_MEM) load_address = sdmac->pc_from_device; - } else { + else if (sdmac->direction == DMA_DEV_TO_DEV) + load_address = sdmac->device_to_device; + else load_address = sdmac->pc_to_device; - } if (load_address < 0) return load_address; @@ -851,6 +911,46 @@ static int sdma_disable_channel(struct dma_chan *chan) return 0; } +static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) +{ + struct sdma_engine *sdma = sdmac->sdma; + + int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML; + int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16; + + set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]); + set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]); + + if (sdmac->event_id0 > 31) + sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE; + + if (sdmac->event_id1 > 31) + sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE; + + /* + * If LWML(src_maxburst) > HWML(dst_maxburst), we need + * swap LWML and HWML of INFO(A.3.2.5.1), also need swap + * r0(event_mask[1]) and r1(event_mask[0]). + */ + if (lwml > hwml) { + sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML | + SDMA_WATERMARK_LEVEL_HWML); + sdmac->watermark_level |= hwml; + sdmac->watermark_level |= lwml << 16; + swap(sdmac->event_mask[0], sdmac->event_mask[1]); + } + + if (sdmac->per_address2 >= sdma->spba_start_addr && + sdmac->per_address2 <= sdma->spba_end_addr) + sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP; + + if (sdmac->per_address >= sdma->spba_start_addr && + sdmac->per_address <= sdma->spba_end_addr) + sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP; + + sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT; +} + static int sdma_config_channel(struct dma_chan *chan) { struct sdma_channel *sdmac = to_sdma_chan(chan); @@ -869,6 +969,12 @@ static int sdma_config_channel(struct dma_chan *chan) sdma_event_enable(sdmac, sdmac->event_id0); } + if (sdmac->event_id1) { + if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events) + return -EINVAL; + sdma_event_enable(sdmac, sdmac->event_id1); + } + switch (sdmac->peripheral_type) { case IMX_DMATYPE_DSP: sdma_config_ownership(sdmac, false, true, true); @@ -887,19 +993,17 @@ static int sdma_config_channel(struct dma_chan *chan) (sdmac->peripheral_type != IMX_DMATYPE_DSP)) { /* Handle multiple event channels differently */ if (sdmac->event_id1) { - sdmac->event_mask[1] = BIT(sdmac->event_id1 % 32); - if (sdmac->event_id1 > 31) - __set_bit(31, &sdmac->watermark_level); - sdmac->event_mask[0] = BIT(sdmac->event_id0 % 32); - if (sdmac->event_id0 > 31) - __set_bit(30, &sdmac->watermark_level); - } else { + if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP || + sdmac->peripheral_type == IMX_DMATYPE_ASRC) + sdma_set_watermarklevel_for_p2p(sdmac); + } else __set_bit(sdmac->event_id0, sdmac->event_mask); - } + /* Watermark Level */ sdmac->watermark_level |= sdmac->watermark_level; /* Address */ sdmac->shp_addr = sdmac->per_address; + sdmac->per_addr = sdmac->per_address2; } else { sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */ } @@ -987,17 +1091,22 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan) sdmac->peripheral_type = data->peripheral_type; sdmac->event_id0 = data->dma_request; + sdmac->event_id1 = data->dma_request2; - clk_enable(sdmac->sdma->clk_ipg); - clk_enable(sdmac->sdma->clk_ahb); + ret = clk_enable(sdmac->sdma->clk_ipg); + if (ret) + return ret; + ret = clk_enable(sdmac->sdma->clk_ahb); + if (ret) + goto disable_clk_ipg; ret = sdma_request_channel(sdmac); if (ret) - return ret; + goto disable_clk_ahb; ret = sdma_set_channel_priority(sdmac, prio); if (ret) - return ret; + goto disable_clk_ahb; dma_async_tx_descriptor_init(&sdmac->desc, chan); sdmac->desc.tx_submit = sdma_tx_submit; @@ -1005,6 +1114,12 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan) sdmac->desc.flags = DMA_CTRL_ACK; return 0; + +disable_clk_ahb: + clk_disable(sdmac->sdma->clk_ahb); +disable_clk_ipg: + clk_disable(sdmac->sdma->clk_ipg); + return ret; } static void sdma_free_chan_resources(struct dma_chan *chan) @@ -1221,6 +1336,14 @@ static int sdma_config(struct dma_chan *chan, sdmac->watermark_level = dmaengine_cfg->src_maxburst * dmaengine_cfg->src_addr_width; sdmac->word_size = dmaengine_cfg->src_addr_width; + } else if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) { + sdmac->per_address2 = dmaengine_cfg->src_addr; + sdmac->per_address = dmaengine_cfg->dst_addr; + sdmac->watermark_level = dmaengine_cfg->src_maxburst & + SDMA_WATERMARK_LEVEL_LWML; + sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) & + SDMA_WATERMARK_LEVEL_HWML; + sdmac->word_size = dmaengine_cfg->dst_addr_width; } else { sdmac->per_address = dmaengine_cfg->dst_addr; sdmac->watermark_level = dmaengine_cfg->dst_maxburst * @@ -1337,6 +1460,72 @@ err_firmware: release_firmware(fw); } +#define EVENT_REMAP_CELLS 3 + +static int __init sdma_event_remap(struct sdma_engine *sdma) +{ + struct device_node *np = sdma->dev->of_node; + struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0); + struct property *event_remap; + struct regmap *gpr; + char propname[] = "fsl,sdma-event-remap"; + u32 reg, val, shift, num_map, i; + int ret = 0; + + if (IS_ERR(np) || IS_ERR(gpr_np)) + goto out; + + event_remap = of_find_property(np, propname, NULL); + num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0; + if (!num_map) { + dev_warn(sdma->dev, "no event needs to be remapped\n"); + goto out; + } else if (num_map % EVENT_REMAP_CELLS) { + dev_err(sdma->dev, "the property %s must modulo %d\n", + propname, EVENT_REMAP_CELLS); + ret = -EINVAL; + goto out; + } + + gpr = syscon_node_to_regmap(gpr_np); + if (IS_ERR(gpr)) { + dev_err(sdma->dev, "failed to get gpr regmap\n"); + ret = PTR_ERR(gpr); + goto out; + } + + for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) { + ret = of_property_read_u32_index(np, propname, i, ®); + if (ret) { + dev_err(sdma->dev, "failed to read property %s index %d\n", + propname, i); + goto out; + } + + ret = of_property_read_u32_index(np, propname, i + 1, &shift); + if (ret) { + dev_err(sdma->dev, "failed to read property %s index %d\n", + propname, i + 1); + goto out; + } + + ret = of_property_read_u32_index(np, propname, i + 2, &val); + if (ret) { + dev_err(sdma->dev, "failed to read property %s index %d\n", + propname, i + 2); + goto out; + } + + regmap_update_bits(gpr, reg, BIT(shift), val << shift); + } + +out: + if (!IS_ERR(gpr_np)) + of_node_put(gpr_np); + + return ret; +} + static int sdma_get_firmware(struct sdma_engine *sdma, const char *fw_name) { @@ -1354,8 +1543,12 @@ static int sdma_init(struct sdma_engine *sdma) int i, ret; dma_addr_t ccb_phys; - clk_enable(sdma->clk_ipg); - clk_enable(sdma->clk_ahb); + ret = clk_enable(sdma->clk_ipg); + if (ret) + return ret; + ret = clk_enable(sdma->clk_ahb); + if (ret) + goto disable_clk_ipg; /* Be sure SDMA has not started yet */ writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); @@ -1411,8 +1604,9 @@ static int sdma_init(struct sdma_engine *sdma) return 0; err_dma_alloc: - clk_disable(sdma->clk_ipg); clk_disable(sdma->clk_ahb); +disable_clk_ipg: + clk_disable(sdma->clk_ipg); dev_err(sdma->dev, "initialisation failed with %d\n", ret); return ret; } @@ -1444,6 +1638,14 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec, data.dma_request = dma_spec->args[0]; data.peripheral_type = dma_spec->args[1]; data.priority = dma_spec->args[2]; + /* + * init dma_request2 to zero, which is not used by the dts. + * For P2P, dma_request2 is init from dma_request_channel(), + * chan->private will point to the imx_dma_data, and in + * device_alloc_chan_resources(), imx_dma_data.dma_request2 will + * be set to sdmac->event_id1. + */ + data.dma_request2 = 0; return dma_request_channel(mask, sdma_filter_fn, &data); } @@ -1453,10 +1655,12 @@ static int sdma_probe(struct platform_device *pdev) const struct of_device_id *of_id = of_match_device(sdma_dt_ids, &pdev->dev); struct device_node *np = pdev->dev.of_node; + struct device_node *spba_bus; const char *fw_name; int ret; int irq; struct resource *iores; + struct resource spba_res; struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev); int i; struct sdma_engine *sdma; @@ -1551,6 +1755,10 @@ static int sdma_probe(struct platform_device *pdev) if (ret) goto err_init; + ret = sdma_event_remap(sdma); + if (ret) + goto err_init; + if (sdma->drvdata->script_addrs) sdma_add_scripts(sdma, sdma->drvdata->script_addrs); if (pdata && pdata->script_addrs) @@ -1608,6 +1816,14 @@ static int sdma_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to register controller\n"); goto err_register; } + + spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus"); + ret = of_address_to_resource(spba_bus, 0, &spba_res); + if (!ret) { + sdma->spba_start_addr = spba_res.start; + sdma->spba_end_addr = spba_res.end; + } + of_node_put(spba_bus); } dev_info(sdma->dev, "initialized\n"); diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile index 0ff7270af..cf5fedbe2 100644 --- a/drivers/dma/ioat/Makefile +++ b/drivers/dma/ioat/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o -ioatdma-y := pci.o dma.o dma_v2.o dma_v3.o dca.o +ioatdma-y := init.o dma.o prep.o dca.o sysfs.o diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index ea1e107ae..2cb7c308d 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c @@ -31,7 +31,6 @@ #include "dma.h" #include "registers.h" -#include "dma_v2.h" /* * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6 @@ -71,14 +70,6 @@ static inline int dca2_tag_map_valid(u8 *tag_map) #define APICID_BIT(x) (DCA_TAG_MAP_VALID | (x)) #define IOAT_TAG_MAP_LEN 8 -static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = { - 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), }; -static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = { - 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), }; -static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = { - 1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), }; -static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 }; - /* pack PCI B/D/F into a u16 */ static inline u16 dcaid_from_pcidev(struct pci_dev *pci) { @@ -126,96 +117,6 @@ struct ioat_dca_priv { struct ioat_dca_slot req_slots[0]; }; -/* 5000 series chipset DCA Port Requester ID Table Entry Format - * [15:8] PCI-Express Bus Number - * [7:3] PCI-Express Device Number - * [2:0] PCI-Express Function Number - * - * 5000 series chipset DCA control register format - * [7:1] Reserved (0) - * [0] Ignore Function Number - */ - -static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev) -{ - struct ioat_dca_priv *ioatdca = dca_priv(dca); - struct pci_dev *pdev; - int i; - u16 id; - - /* This implementation only supports PCI-Express */ - if (!dev_is_pci(dev)) - return -ENODEV; - pdev = to_pci_dev(dev); - id = dcaid_from_pcidev(pdev); - - if (ioatdca->requester_count == ioatdca->max_requesters) - return -ENODEV; - - for (i = 0; i < ioatdca->max_requesters; i++) { - if (ioatdca->req_slots[i].pdev == NULL) { - /* found an empty slot */ - ioatdca->requester_count++; - ioatdca->req_slots[i].pdev = pdev; - ioatdca->req_slots[i].rid = id; - writew(id, ioatdca->dca_base + (i * 4)); - /* make sure the ignore function bit is off */ - writeb(0, ioatdca->dca_base + (i * 4) + 2); - return i; - } - } - /* Error, ioatdma->requester_count is out of whack */ - return -EFAULT; -} - -static int ioat_dca_remove_requester(struct dca_provider *dca, - struct device *dev) -{ - struct ioat_dca_priv *ioatdca = dca_priv(dca); - struct pci_dev *pdev; - int i; - - /* This implementation only supports PCI-Express */ - if (!dev_is_pci(dev)) - return -ENODEV; - pdev = to_pci_dev(dev); - - for (i = 0; i < ioatdca->max_requesters; i++) { - if (ioatdca->req_slots[i].pdev == pdev) { - writew(0, ioatdca->dca_base + (i * 4)); - ioatdca->req_slots[i].pdev = NULL; - ioatdca->req_slots[i].rid = 0; - ioatdca->requester_count--; - return i; - } - } - return -ENODEV; -} - -static u8 ioat_dca_get_tag(struct dca_provider *dca, - struct device *dev, - int cpu) -{ - struct ioat_dca_priv *ioatdca = dca_priv(dca); - int i, apic_id, bit, value; - u8 entry, tag; - - tag = 0; - apic_id = cpu_physical_id(cpu); - - for (i = 0; i < IOAT_TAG_MAP_LEN; i++) { - entry = ioatdca->tag_map[i]; - if (entry & DCA_TAG_MAP_VALID) { - bit = entry & ~DCA_TAG_MAP_VALID; - value = (apic_id & (1 << bit)) ? 1 : 0; - } else { - value = entry ? 1 : 0; - } - tag |= (value << i); - } - return tag; -} - static int ioat_dca_dev_managed(struct dca_provider *dca, struct device *dev) { @@ -231,260 +132,7 @@ static int ioat_dca_dev_managed(struct dca_provider *dca, return 0; } -static struct dca_ops ioat_dca_ops = { - .add_requester = ioat_dca_add_requester, - .remove_requester = ioat_dca_remove_requester, - .get_tag = ioat_dca_get_tag, - .dev_managed = ioat_dca_dev_managed, -}; - - -struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) -{ - struct dca_provider *dca; - struct ioat_dca_priv *ioatdca; - u8 *tag_map = NULL; - int i; - int err; - u8 version; - u8 max_requesters; - - if (!system_has_dca_enabled(pdev)) - return NULL; - - /* I/OAT v1 systems must have a known tag_map to support DCA */ - switch (pdev->vendor) { - case PCI_VENDOR_ID_INTEL: - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT: - tag_map = ioat_tag_map_BNB; - break; - case PCI_DEVICE_ID_INTEL_IOAT_CNB: - tag_map = ioat_tag_map_CNB; - break; - case PCI_DEVICE_ID_INTEL_IOAT_SCNB: - tag_map = ioat_tag_map_SCNB; - break; - } - break; - case PCI_VENDOR_ID_UNISYS: - switch (pdev->device) { - case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR: - tag_map = ioat_tag_map_UNISYS; - break; - } - break; - } - if (tag_map == NULL) - return NULL; - - version = readb(iobase + IOAT_VER_OFFSET); - if (version == IOAT_VER_3_0) - max_requesters = IOAT3_DCA_MAX_REQ; - else - max_requesters = IOAT_DCA_MAX_REQ; - - dca = alloc_dca_provider(&ioat_dca_ops, - sizeof(*ioatdca) + - (sizeof(struct ioat_dca_slot) * max_requesters)); - if (!dca) - return NULL; - - ioatdca = dca_priv(dca); - ioatdca->max_requesters = max_requesters; - ioatdca->dca_base = iobase + 0x54; - - /* copy over the APIC ID to DCA tag mapping */ - for (i = 0; i < IOAT_TAG_MAP_LEN; i++) - ioatdca->tag_map[i] = tag_map[i]; - - err = register_dca_provider(dca, &pdev->dev); - if (err) { - free_dca_provider(dca); - return NULL; - } - - return dca; -} - - -static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev) -{ - struct ioat_dca_priv *ioatdca = dca_priv(dca); - struct pci_dev *pdev; - int i; - u16 id; - u16 global_req_table; - - /* This implementation only supports PCI-Express */ - if (!dev_is_pci(dev)) - return -ENODEV; - pdev = to_pci_dev(dev); - id = dcaid_from_pcidev(pdev); - - if (ioatdca->requester_count == ioatdca->max_requesters) - return -ENODEV; - - for (i = 0; i < ioatdca->max_requesters; i++) { - if (ioatdca->req_slots[i].pdev == NULL) { - /* found an empty slot */ - ioatdca->requester_count++; - ioatdca->req_slots[i].pdev = pdev; - ioatdca->req_slots[i].rid = id; - global_req_table = - readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET); - writel(id | IOAT_DCA_GREQID_VALID, - ioatdca->iobase + global_req_table + (i * 4)); - return i; - } - } - /* Error, ioatdma->requester_count is out of whack */ - return -EFAULT; -} - -static int ioat2_dca_remove_requester(struct dca_provider *dca, - struct device *dev) -{ - struct ioat_dca_priv *ioatdca = dca_priv(dca); - struct pci_dev *pdev; - int i; - u16 global_req_table; - - /* This implementation only supports PCI-Express */ - if (!dev_is_pci(dev)) - return -ENODEV; - pdev = to_pci_dev(dev); - - for (i = 0; i < ioatdca->max_requesters; i++) { - if (ioatdca->req_slots[i].pdev == pdev) { - global_req_table = - readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET); - writel(0, ioatdca->iobase + global_req_table + (i * 4)); - ioatdca->req_slots[i].pdev = NULL; - ioatdca->req_slots[i].rid = 0; - ioatdca->requester_count--; - return i; - } - } - return -ENODEV; -} - -static u8 ioat2_dca_get_tag(struct dca_provider *dca, - struct device *dev, - int cpu) -{ - u8 tag; - - tag = ioat_dca_get_tag(dca, dev, cpu); - tag = (~tag) & 0x1F; - return tag; -} - -static struct dca_ops ioat2_dca_ops = { - .add_requester = ioat2_dca_add_requester, - .remove_requester = ioat2_dca_remove_requester, - .get_tag = ioat2_dca_get_tag, - .dev_managed = ioat_dca_dev_managed, -}; - -static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) -{ - int slots = 0; - u32 req; - u16 global_req_table; - - global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET); - if (global_req_table == 0) - return 0; - do { - req = readl(iobase + global_req_table + (slots * sizeof(u32))); - slots++; - } while ((req & IOAT_DCA_GREQID_LASTID) == 0); - - return slots; -} - -struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) -{ - struct dca_provider *dca; - struct ioat_dca_priv *ioatdca; - int slots; - int i; - int err; - u32 tag_map; - u16 dca_offset; - u16 csi_fsb_control; - u16 pcie_control; - u8 bit; - - if (!system_has_dca_enabled(pdev)) - return NULL; - - dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET); - if (dca_offset == 0) - return NULL; - - slots = ioat2_dca_count_dca_slots(iobase, dca_offset); - if (slots == 0) - return NULL; - - dca = alloc_dca_provider(&ioat2_dca_ops, - sizeof(*ioatdca) - + (sizeof(struct ioat_dca_slot) * slots)); - if (!dca) - return NULL; - - ioatdca = dca_priv(dca); - ioatdca->iobase = iobase; - ioatdca->dca_base = iobase + dca_offset; - ioatdca->max_requesters = slots; - - /* some bios might not know to turn these on */ - csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET); - if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) { - csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH; - writew(csi_fsb_control, - ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET); - } - pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET); - if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) { - pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR; - writew(pcie_control, - ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET); - } - - - /* TODO version, compatibility and configuration checks */ - - /* copy out the APIC to DCA tag map */ - tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET); - for (i = 0; i < 5; i++) { - bit = (tag_map >> (4 * i)) & 0x0f; - if (bit < 8) - ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID; - else - ioatdca->tag_map[i] = 0; - } - - if (!dca2_tag_map_valid(ioatdca->tag_map)) { - WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND, - "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n", - dev_driver_string(&pdev->dev), - dev_name(&pdev->dev)); - free_dca_provider(dca); - return NULL; - } - - err = register_dca_provider(dca, &pdev->dev); - if (err) { - free_dca_provider(dca); - return NULL; - } - - return dca; -} - -static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev) +static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); struct pci_dev *pdev; @@ -518,7 +166,7 @@ static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev) return -EFAULT; } -static int ioat3_dca_remove_requester(struct dca_provider *dca, +static int ioat_dca_remove_requester(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); @@ -545,7 +193,7 @@ static int ioat3_dca_remove_requester(struct dca_provider *dca, return -ENODEV; } -static u8 ioat3_dca_get_tag(struct dca_provider *dca, +static u8 ioat_dca_get_tag(struct dca_provider *dca, struct device *dev, int cpu) { @@ -576,14 +224,14 @@ static u8 ioat3_dca_get_tag(struct dca_provider *dca, return tag; } -static struct dca_ops ioat3_dca_ops = { - .add_requester = ioat3_dca_add_requester, - .remove_requester = ioat3_dca_remove_requester, - .get_tag = ioat3_dca_get_tag, +static struct dca_ops ioat_dca_ops = { + .add_requester = ioat_dca_add_requester, + .remove_requester = ioat_dca_remove_requester, + .get_tag = ioat_dca_get_tag, .dev_managed = ioat_dca_dev_managed, }; -static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset) +static int ioat_dca_count_dca_slots(void *iobase, u16 dca_offset) { int slots = 0; u32 req; @@ -618,7 +266,7 @@ static inline int dca3_tag_map_invalid(u8 *tag_map) (tag_map[4] == DCA_TAG_MAP_VALID)); } -struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) +struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) { struct dca_provider *dca; struct ioat_dca_priv *ioatdca; @@ -645,11 +293,11 @@ struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) if (dca_offset == 0) return NULL; - slots = ioat3_dca_count_dca_slots(iobase, dca_offset); + slots = ioat_dca_count_dca_slots(iobase, dca_offset); if (slots == 0) return NULL; - dca = alloc_dca_provider(&ioat3_dca_ops, + dca = alloc_dca_provider(&ioat_dca_ops, sizeof(*ioatdca) + (sizeof(struct ioat_dca_slot) * slots)); if (!dca) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index ee0aa9f4c..f66b7e640 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -1,6 +1,6 @@ /* * Intel I/OAT DMA Linux driver - * Copyright(c) 2004 - 2009 Intel Corporation. + * Copyright(c) 2004 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -31,31 +31,23 @@ #include #include #include -#include #include "dma.h" #include "registers.h" #include "hw.h" #include "../dmaengine.h" -int ioat_pending_level = 4; -module_param(ioat_pending_level, int, 0644); -MODULE_PARM_DESC(ioat_pending_level, - "high-water mark for pushing ioat descriptors (default: 4)"); - -/* internal functions */ -static void ioat1_cleanup(struct ioat_dma_chan *ioat); -static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat); +static void ioat_eh(struct ioatdma_chan *ioat_chan); /** * ioat_dma_do_interrupt - handler used for single vector interrupt mode * @irq: interrupt id * @data: interrupt data */ -static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) +irqreturn_t ioat_dma_do_interrupt(int irq, void *data) { struct ioatdma_device *instance = data; - struct ioat_chan_common *chan; + struct ioatdma_chan *ioat_chan; unsigned long attnstatus; int bit; u8 intrctrl; @@ -72,9 +64,9 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) { - chan = ioat_chan_by_index(instance, bit); - if (test_bit(IOAT_RUN, &chan->state)) - tasklet_schedule(&chan->cleanup_task); + ioat_chan = ioat_chan_by_index(instance, bit); + if (test_bit(IOAT_RUN, &ioat_chan->state)) + tasklet_schedule(&ioat_chan->cleanup_task); } writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); @@ -86,1161 +78,912 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) * @irq: interrupt id * @data: interrupt data */ -static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) +irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) { - struct ioat_chan_common *chan = data; + struct ioatdma_chan *ioat_chan = data; - if (test_bit(IOAT_RUN, &chan->state)) - tasklet_schedule(&chan->cleanup_task); + if (test_bit(IOAT_RUN, &ioat_chan->state)) + tasklet_schedule(&ioat_chan->cleanup_task); return IRQ_HANDLED; } -/* common channel initialization */ -void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx) +void ioat_stop(struct ioatdma_chan *ioat_chan) { - struct dma_device *dma = &device->common; - struct dma_chan *c = &chan->common; - unsigned long data = (unsigned long) c; - - chan->device = device; - chan->reg_base = device->reg_base + (0x80 * (idx + 1)); - spin_lock_init(&chan->cleanup_lock); - chan->common.device = dma; - dma_cookie_init(&chan->common); - list_add_tail(&chan->common.device_node, &dma->channels); - device->idx[idx] = chan; - init_timer(&chan->timer); - chan->timer.function = device->timer_fn; - chan->timer.data = data; - tasklet_init(&chan->cleanup_task, device->cleanup_fn, data); + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; + struct pci_dev *pdev = ioat_dma->pdev; + int chan_id = chan_num(ioat_chan); + struct msix_entry *msix; + + /* 1/ stop irq from firing tasklets + * 2/ stop the tasklet from re-arming irqs + */ + clear_bit(IOAT_RUN, &ioat_chan->state); + + /* flush inflight interrupts */ + switch (ioat_dma->irq_mode) { + case IOAT_MSIX: + msix = &ioat_dma->msix_entries[chan_id]; + synchronize_irq(msix->vector); + break; + case IOAT_MSI: + case IOAT_INTX: + synchronize_irq(pdev->irq); + break; + default: + break; + } + + /* flush inflight timers */ + del_timer_sync(&ioat_chan->timer); + + /* flush inflight tasklet runs */ + tasklet_kill(&ioat_chan->cleanup_task); + + /* final cleanup now that everything is quiesced and can't re-arm */ + ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan); } -/** - * ioat1_dma_enumerate_channels - find and initialize the device's channels - * @device: the device to be enumerated - */ -static int ioat1_enumerate_channels(struct ioatdma_device *device) +static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan) { - u8 xfercap_scale; - u32 xfercap; - int i; - struct ioat_dma_chan *ioat; - struct device *dev = &device->pdev->dev; - struct dma_device *dma = &device->common; - - INIT_LIST_HEAD(&dma->channels); - dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); - dma->chancnt &= 0x1f; /* bits [4:0] valid */ - if (dma->chancnt > ARRAY_SIZE(device->idx)) { - dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", - dma->chancnt, ARRAY_SIZE(device->idx)); - dma->chancnt = ARRAY_SIZE(device->idx); - } - xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); - xfercap_scale &= 0x1f; /* bits [4:0] valid */ - xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); - dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap); - -#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL - if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) - dma->chancnt--; -#endif - for (i = 0; i < dma->chancnt; i++) { - ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); - if (!ioat) - break; + ioat_chan->dmacount += ioat_ring_pending(ioat_chan); + ioat_chan->issued = ioat_chan->head; + writew(ioat_chan->dmacount, + ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); + dev_dbg(to_dev(ioat_chan), + "%s: head: %#x tail: %#x issued: %#x count: %#x\n", + __func__, ioat_chan->head, ioat_chan->tail, + ioat_chan->issued, ioat_chan->dmacount); +} + +void ioat_issue_pending(struct dma_chan *c) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); - ioat_init_channel(device, &ioat->base, i); - ioat->xfercap = xfercap; - spin_lock_init(&ioat->desc_lock); - INIT_LIST_HEAD(&ioat->free_desc); - INIT_LIST_HEAD(&ioat->used_desc); + if (ioat_ring_pending(ioat_chan)) { + spin_lock_bh(&ioat_chan->prep_lock); + __ioat_issue_pending(ioat_chan); + spin_unlock_bh(&ioat_chan->prep_lock); } - dma->chancnt = i; - return i; } /** - * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended - * descriptors to hw - * @chan: DMA channel handle + * ioat_update_pending - log pending descriptors + * @ioat: ioat+ channel + * + * Check if the number of unsubmitted descriptors has exceeded the + * watermark. Called with prep_lock held */ -static inline void -__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat) +static void ioat_update_pending(struct ioatdma_chan *ioat_chan) { - void __iomem *reg_base = ioat->base.reg_base; - - dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n", - __func__, ioat->pending); - ioat->pending = 0; - writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET); + if (ioat_ring_pending(ioat_chan) > ioat_pending_level) + __ioat_issue_pending(ioat_chan); } -static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) +static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan) { - struct ioat_dma_chan *ioat = to_ioat_chan(chan); + struct ioat_ring_ent *desc; + struct ioat_dma_descriptor *hw; - if (ioat->pending > 0) { - spin_lock_bh(&ioat->desc_lock); - __ioat1_dma_memcpy_issue_pending(ioat); - spin_unlock_bh(&ioat->desc_lock); + if (ioat_ring_space(ioat_chan) < 1) { + dev_err(to_dev(ioat_chan), + "Unable to start null desc - ring full\n"); + return; } + + dev_dbg(to_dev(ioat_chan), + "%s: head: %#x tail: %#x issued: %#x\n", + __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued); + desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head); + + hw = desc->hw; + hw->ctl = 0; + hw->ctl_f.null = 1; + hw->ctl_f.int_en = 1; + hw->ctl_f.compl_write = 1; + /* set size to non-zero value (channel returns error when size is 0) */ + hw->size = NULL_DESC_BUFFER_SIZE; + hw->src_addr = 0; + hw->dst_addr = 0; + async_tx_ack(&desc->txd); + ioat_set_chainaddr(ioat_chan, desc->txd.phys); + dump_desc_dbg(ioat_chan, desc); + /* make sure descriptors are written before we submit */ + wmb(); + ioat_chan->head += 1; + __ioat_issue_pending(ioat_chan); } -/** - * ioat1_reset_channel - restart a channel - * @ioat: IOAT DMA channel handle - */ -static void ioat1_reset_channel(struct ioat_dma_chan *ioat) +void ioat_start_null_desc(struct ioatdma_chan *ioat_chan) { - struct ioat_chan_common *chan = &ioat->base; - void __iomem *reg_base = chan->reg_base; - u32 chansts, chanerr; - - dev_warn(to_dev(chan), "reset\n"); - chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); - chansts = *chan->completion & IOAT_CHANSTS_STATUS; - if (chanerr) { - dev_err(to_dev(chan), - "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", - chan_num(chan), chansts, chanerr); - writel(chanerr, reg_base + IOAT_CHANERR_OFFSET); + spin_lock_bh(&ioat_chan->prep_lock); + __ioat_start_null_desc(ioat_chan); + spin_unlock_bh(&ioat_chan->prep_lock); +} + +static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan) +{ + /* set the tail to be re-issued */ + ioat_chan->issued = ioat_chan->tail; + ioat_chan->dmacount = 0; + mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); + + dev_dbg(to_dev(ioat_chan), + "%s: head: %#x tail: %#x issued: %#x count: %#x\n", + __func__, ioat_chan->head, ioat_chan->tail, + ioat_chan->issued, ioat_chan->dmacount); + + if (ioat_ring_pending(ioat_chan)) { + struct ioat_ring_ent *desc; + + desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail); + ioat_set_chainaddr(ioat_chan, desc->txd.phys); + __ioat_issue_pending(ioat_chan); + } else + __ioat_start_null_desc(ioat_chan); +} + +static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo) +{ + unsigned long end = jiffies + tmo; + int err = 0; + u32 status; + + status = ioat_chansts(ioat_chan); + if (is_ioat_active(status) || is_ioat_idle(status)) + ioat_suspend(ioat_chan); + while (is_ioat_active(status) || is_ioat_idle(status)) { + if (tmo && time_after(jiffies, end)) { + err = -ETIMEDOUT; + break; + } + status = ioat_chansts(ioat_chan); + cpu_relax(); } - /* - * whack it upside the head with a reset - * and wait for things to settle out. - * force the pending count to a really big negative - * to make sure no one forces an issue_pending - * while we're waiting. - */ + return err; +} - ioat->pending = INT_MIN; - writeb(IOAT_CHANCMD_RESET, - reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); - set_bit(IOAT_RESET_PENDING, &chan->state); - mod_timer(&chan->timer, jiffies + RESET_DELAY); +static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo) +{ + unsigned long end = jiffies + tmo; + int err = 0; + + ioat_reset(ioat_chan); + while (ioat_reset_pending(ioat_chan)) { + if (end && time_after(jiffies, end)) { + err = -ETIMEDOUT; + break; + } + cpu_relax(); + } + + return err; } -static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) +static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx) + __releases(&ioat_chan->prep_lock) { struct dma_chan *c = tx->chan; - struct ioat_dma_chan *ioat = to_ioat_chan(c); - struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); - struct ioat_chan_common *chan = &ioat->base; - struct ioat_desc_sw *first; - struct ioat_desc_sw *chain_tail; + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); dma_cookie_t cookie; - spin_lock_bh(&ioat->desc_lock); - /* cookie incr and addition to used_list must be atomic */ cookie = dma_cookie_assign(tx); - dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); + dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie); + + if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state)) + mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); - /* write address into NextDescriptor field of last desc in chain */ - first = to_ioat_desc(desc->tx_list.next); - chain_tail = to_ioat_desc(ioat->used_desc.prev); - /* make descriptor updates globally visible before chaining */ + /* make descriptor updates visible before advancing ioat->head, + * this is purposefully not smp_wmb() since we are also + * publishing the descriptor updates to a dma device + */ wmb(); - chain_tail->hw->next = first->txd.phys; - list_splice_tail_init(&desc->tx_list, &ioat->used_desc); - dump_desc_dbg(ioat, chain_tail); - dump_desc_dbg(ioat, first); - if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + ioat_chan->head += ioat_chan->produce; - ioat->active += desc->hw->tx_cnt; - ioat->pending += desc->hw->tx_cnt; - if (ioat->pending >= ioat_pending_level) - __ioat1_dma_memcpy_issue_pending(ioat); - spin_unlock_bh(&ioat->desc_lock); + ioat_update_pending(ioat_chan); + spin_unlock_bh(&ioat_chan->prep_lock); return cookie; } -/** - * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair - * @ioat: the channel supplying the memory pool for the descriptors - * @flags: allocation flags - */ -static struct ioat_desc_sw * -ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) +static struct ioat_ring_ent * +ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags) { - struct ioat_dma_descriptor *desc; - struct ioat_desc_sw *desc_sw; - struct ioatdma_device *ioatdma_device; + struct ioat_dma_descriptor *hw; + struct ioat_ring_ent *desc; + struct ioatdma_device *ioat_dma; dma_addr_t phys; - ioatdma_device = ioat->base.device; - desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); - if (unlikely(!desc)) + ioat_dma = to_ioatdma_device(chan->device); + hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys); + if (!hw) return NULL; + memset(hw, 0, sizeof(*hw)); - desc_sw = kzalloc(sizeof(*desc_sw), flags); - if (unlikely(!desc_sw)) { - pci_pool_free(ioatdma_device->dma_pool, desc, phys); + desc = kmem_cache_zalloc(ioat_cache, flags); + if (!desc) { + pci_pool_free(ioat_dma->dma_pool, hw, phys); return NULL; } - memset(desc, 0, sizeof(*desc)); + dma_async_tx_descriptor_init(&desc->txd, chan); + desc->txd.tx_submit = ioat_tx_submit_unlock; + desc->hw = hw; + desc->txd.phys = phys; + return desc; +} - INIT_LIST_HEAD(&desc_sw->tx_list); - dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common); - desc_sw->txd.tx_submit = ioat1_tx_submit; - desc_sw->hw = desc; - desc_sw->txd.phys = phys; - set_desc_id(desc_sw, -1); +void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) +{ + struct ioatdma_device *ioat_dma; - return desc_sw; + ioat_dma = to_ioatdma_device(chan->device); + pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys); + kmem_cache_free(ioat_cache, desc); } -static int ioat_initial_desc_count = 256; -module_param(ioat_initial_desc_count, int, 0644); -MODULE_PARM_DESC(ioat_initial_desc_count, - "ioat1: initial descriptors per channel (default: 256)"); -/** - * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors - * @chan: the channel to be filled out - */ -static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) +struct ioat_ring_ent ** +ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) { - struct ioat_dma_chan *ioat = to_ioat_chan(c); - struct ioat_chan_common *chan = &ioat->base; - struct ioat_desc_sw *desc; - u32 chanerr; + struct ioat_ring_ent **ring; + int descs = 1 << order; int i; - LIST_HEAD(tmp_list); - - /* have we already been set up? */ - if (!list_empty(&ioat->free_desc)) - return ioat->desccount; - /* Setup register to interrupt and write completion status on error */ - writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); + if (order > ioat_get_max_alloc_order()) + return NULL; - chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - if (chanerr) { - dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); - writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); + /* allocate the array to hold the software ring */ + ring = kcalloc(descs, sizeof(*ring), flags); + if (!ring) + return NULL; + for (i = 0; i < descs; i++) { + ring[i] = ioat_alloc_ring_ent(c, flags); + if (!ring[i]) { + while (i--) + ioat_free_ring_ent(ring[i], c); + kfree(ring); + return NULL; + } + set_desc_id(ring[i], i); } - /* Allocate descriptors */ - for (i = 0; i < ioat_initial_desc_count; i++) { - desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL); - if (!desc) { - dev_err(to_dev(chan), "Only %d initial descriptors\n", i); - break; - } - set_desc_id(desc, i); - list_add_tail(&desc->node, &tmp_list); + /* link descs */ + for (i = 0; i < descs-1; i++) { + struct ioat_ring_ent *next = ring[i+1]; + struct ioat_dma_descriptor *hw = ring[i]->hw; + + hw->next = next->txd.phys; } - spin_lock_bh(&ioat->desc_lock); - ioat->desccount = i; - list_splice(&tmp_list, &ioat->free_desc); - spin_unlock_bh(&ioat->desc_lock); - - /* allocate a completion writeback area */ - /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ - chan->completion = pci_pool_alloc(chan->device->completion_pool, - GFP_KERNEL, &chan->completion_dma); - memset(chan->completion, 0, sizeof(*chan->completion)); - writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, - chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); - writel(((u64) chan->completion_dma) >> 32, - chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); - - set_bit(IOAT_RUN, &chan->state); - ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ - dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", - __func__, ioat->desccount); - return ioat->desccount; + ring[i]->hw->next = ring[0]->txd.phys; + + return ring; } -void ioat_stop(struct ioat_chan_common *chan) +static bool reshape_ring(struct ioatdma_chan *ioat_chan, int order) { - struct ioatdma_device *device = chan->device; - struct pci_dev *pdev = device->pdev; - int chan_id = chan_num(chan); - struct msix_entry *msix; + /* reshape differs from normal ring allocation in that we want + * to allocate a new software ring while only + * extending/truncating the hardware ring + */ + struct dma_chan *c = &ioat_chan->dma_chan; + const u32 curr_size = ioat_ring_size(ioat_chan); + const u16 active = ioat_ring_active(ioat_chan); + const u32 new_size = 1 << order; + struct ioat_ring_ent **ring; + u32 i; + + if (order > ioat_get_max_alloc_order()) + return false; - /* 1/ stop irq from firing tasklets - * 2/ stop the tasklet from re-arming irqs + /* double check that we have at least 1 free descriptor */ + if (active == curr_size) + return false; + + /* when shrinking, verify that we can hold the current active + * set in the new ring */ - clear_bit(IOAT_RUN, &chan->state); + if (active >= new_size) + return false; - /* flush inflight interrupts */ - switch (device->irq_mode) { - case IOAT_MSIX: - msix = &device->msix_entries[chan_id]; - synchronize_irq(msix->vector); - break; - case IOAT_MSI: - case IOAT_INTX: - synchronize_irq(pdev->irq); - break; - default: - break; - } + /* allocate the array to hold the software ring */ + ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT); + if (!ring) + return false; - /* flush inflight timers */ - del_timer_sync(&chan->timer); + /* allocate/trim descriptors as needed */ + if (new_size > curr_size) { + /* copy current descriptors to the new ring */ + for (i = 0; i < curr_size; i++) { + u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1); + u16 new_idx = (ioat_chan->tail+i) & (new_size-1); - /* flush inflight tasklet runs */ - tasklet_kill(&chan->cleanup_task); + ring[new_idx] = ioat_chan->ring[curr_idx]; + set_desc_id(ring[new_idx], new_idx); + } - /* final cleanup now that everything is quiesced and can't re-arm */ - device->cleanup_fn((unsigned long) &chan->common); -} + /* add new descriptors to the ring */ + for (i = curr_size; i < new_size; i++) { + u16 new_idx = (ioat_chan->tail+i) & (new_size-1); -/** - * ioat1_dma_free_chan_resources - release all the descriptors - * @chan: the channel to be cleaned - */ -static void ioat1_dma_free_chan_resources(struct dma_chan *c) -{ - struct ioat_dma_chan *ioat = to_ioat_chan(c); - struct ioat_chan_common *chan = &ioat->base; - struct ioatdma_device *ioatdma_device = chan->device; - struct ioat_desc_sw *desc, *_desc; - int in_use_descs = 0; - - /* Before freeing channel resources first check - * if they have been previously allocated for this channel. - */ - if (ioat->desccount == 0) - return; + ring[new_idx] = ioat_alloc_ring_ent(c, GFP_NOWAIT); + if (!ring[new_idx]) { + while (i--) { + u16 new_idx = (ioat_chan->tail+i) & + (new_size-1); + + ioat_free_ring_ent(ring[new_idx], c); + } + kfree(ring); + return false; + } + set_desc_id(ring[new_idx], new_idx); + } - ioat_stop(chan); + /* hw link new descriptors */ + for (i = curr_size-1; i < new_size; i++) { + u16 new_idx = (ioat_chan->tail+i) & (new_size-1); + struct ioat_ring_ent *next = + ring[(new_idx+1) & (new_size-1)]; + struct ioat_dma_descriptor *hw = ring[new_idx]->hw; - /* Delay 100ms after reset to allow internal DMA logic to quiesce - * before removing DMA descriptor resources. - */ - writeb(IOAT_CHANCMD_RESET, - chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); - mdelay(100); - - spin_lock_bh(&ioat->desc_lock); - list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { - dev_dbg(to_dev(chan), "%s: freeing %d from used list\n", - __func__, desc_id(desc)); - dump_desc_dbg(ioat, desc); - in_use_descs++; - list_del(&desc->node); - pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->txd.phys); - kfree(desc); - } - list_for_each_entry_safe(desc, _desc, - &ioat->free_desc, node) { - list_del(&desc->node); - pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->txd.phys); - kfree(desc); + hw->next = next->txd.phys; + } + } else { + struct ioat_dma_descriptor *hw; + struct ioat_ring_ent *next; + + /* copy current descriptors to the new ring, dropping the + * removed descriptors + */ + for (i = 0; i < new_size; i++) { + u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1); + u16 new_idx = (ioat_chan->tail+i) & (new_size-1); + + ring[new_idx] = ioat_chan->ring[curr_idx]; + set_desc_id(ring[new_idx], new_idx); + } + + /* free deleted descriptors */ + for (i = new_size; i < curr_size; i++) { + struct ioat_ring_ent *ent; + + ent = ioat_get_ring_ent(ioat_chan, ioat_chan->tail+i); + ioat_free_ring_ent(ent, c); + } + + /* fix up hardware ring */ + hw = ring[(ioat_chan->tail+new_size-1) & (new_size-1)]->hw; + next = ring[(ioat_chan->tail+new_size) & (new_size-1)]; + hw->next = next->txd.phys; } - spin_unlock_bh(&ioat->desc_lock); - pci_pool_free(ioatdma_device->completion_pool, - chan->completion, - chan->completion_dma); + dev_dbg(to_dev(ioat_chan), "%s: allocated %d descriptors\n", + __func__, new_size); - /* one is ok since we left it on there on purpose */ - if (in_use_descs > 1) - dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", - in_use_descs - 1); + kfree(ioat_chan->ring); + ioat_chan->ring = ring; + ioat_chan->alloc_order = order; - chan->last_completion = 0; - chan->completion_dma = 0; - ioat->pending = 0; - ioat->desccount = 0; + return true; } /** - * ioat1_dma_get_next_descriptor - return the next available descriptor - * @ioat: IOAT DMA channel handle - * - * Gets the next descriptor from the chain, and must be called with the - * channel's desc_lock held. Allocates more descriptors if the channel - * has run out. + * ioat_check_space_lock - verify space and grab ring producer lock + * @ioat: ioat,3 channel (ring) to operate on + * @num_descs: allocation length */ -static struct ioat_desc_sw * -ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat) +int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs) + __acquires(&ioat_chan->prep_lock) { - struct ioat_desc_sw *new; + bool retry; - if (!list_empty(&ioat->free_desc)) { - new = to_ioat_desc(ioat->free_desc.next); - list_del(&new->node); - } else { - /* try to get another desc */ - new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC); - if (!new) { - dev_err(to_dev(&ioat->base), "alloc failed\n"); - return NULL; - } + retry: + spin_lock_bh(&ioat_chan->prep_lock); + /* never allow the last descriptor to be consumed, we need at + * least one free at all times to allow for on-the-fly ring + * resizing. + */ + if (likely(ioat_ring_space(ioat_chan) > num_descs)) { + dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n", + __func__, num_descs, ioat_chan->head, + ioat_chan->tail, ioat_chan->issued); + ioat_chan->produce = num_descs; + return 0; /* with ioat->prep_lock held */ + } + retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state); + spin_unlock_bh(&ioat_chan->prep_lock); + + /* is another cpu already trying to expand the ring? */ + if (retry) + goto retry; + + spin_lock_bh(&ioat_chan->cleanup_lock); + spin_lock_bh(&ioat_chan->prep_lock); + retry = reshape_ring(ioat_chan, ioat_chan->alloc_order + 1); + clear_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state); + spin_unlock_bh(&ioat_chan->prep_lock); + spin_unlock_bh(&ioat_chan->cleanup_lock); + + /* if we were able to expand the ring retry the allocation */ + if (retry) + goto retry; + + dev_dbg_ratelimited(to_dev(ioat_chan), + "%s: ring full! num_descs: %d (%x:%x:%x)\n", + __func__, num_descs, ioat_chan->head, + ioat_chan->tail, ioat_chan->issued); + + /* progress reclaim in the allocation failure case we may be + * called under bh_disabled so we need to trigger the timer + * event directly + */ + if (time_is_before_jiffies(ioat_chan->timer.expires) + && timer_pending(&ioat_chan->timer)) { + mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); + ioat_timer_event((unsigned long)ioat_chan); } - dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n", - __func__, desc_id(new)); - prefetch(new->hw); - return new; + + return -ENOMEM; } -static struct dma_async_tx_descriptor * -ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, - dma_addr_t dma_src, size_t len, unsigned long flags) +static bool desc_has_ext(struct ioat_ring_ent *desc) { - struct ioat_dma_chan *ioat = to_ioat_chan(c); - struct ioat_desc_sw *desc; - size_t copy; - LIST_HEAD(chain); - dma_addr_t src = dma_src; - dma_addr_t dest = dma_dest; - size_t total_len = len; - struct ioat_dma_descriptor *hw = NULL; - int tx_cnt = 0; - - spin_lock_bh(&ioat->desc_lock); - desc = ioat1_dma_get_next_descriptor(ioat); - do { - if (!desc) - break; - - tx_cnt++; - copy = min_t(size_t, len, ioat->xfercap); + struct ioat_dma_descriptor *hw = desc->hw; - hw = desc->hw; - hw->size = copy; - hw->ctl = 0; - hw->src_addr = src; - hw->dst_addr = dest; + if (hw->ctl_f.op == IOAT_OP_XOR || + hw->ctl_f.op == IOAT_OP_XOR_VAL) { + struct ioat_xor_descriptor *xor = desc->xor; - list_add_tail(&desc->node, &chain); + if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5) + return true; + } else if (hw->ctl_f.op == IOAT_OP_PQ || + hw->ctl_f.op == IOAT_OP_PQ_VAL) { + struct ioat_pq_descriptor *pq = desc->pq; - len -= copy; - dest += copy; - src += copy; - if (len) { - struct ioat_desc_sw *next; - - async_tx_ack(&desc->txd); - next = ioat1_dma_get_next_descriptor(ioat); - hw->next = next ? next->txd.phys : 0; - dump_desc_dbg(ioat, desc); - desc = next; - } else - hw->next = 0; - } while (len); - - if (!desc) { - struct ioat_chan_common *chan = &ioat->base; - - dev_err(to_dev(chan), - "chan%d - get_next_desc failed\n", chan_num(chan)); - list_splice(&chain, &ioat->free_desc); - spin_unlock_bh(&ioat->desc_lock); - return NULL; + if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3) + return true; } - spin_unlock_bh(&ioat->desc_lock); - desc->txd.flags = flags; - desc->len = total_len; - list_splice(&chain, &desc->tx_list); - hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); - hw->ctl_f.compl_write = 1; - hw->tx_cnt = tx_cnt; - dump_desc_dbg(ioat, desc); - - return &desc->txd; + return false; } -static void ioat1_cleanup_event(unsigned long data) +static void +ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed) { - struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); - struct ioat_chan_common *chan = &ioat->base; - - ioat1_cleanup(ioat); - if (!test_bit(IOAT_RUN, &chan->state)) + if (!sed) return; - writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); + + dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); + kmem_cache_free(ioat_sed_cache, sed); } -dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) +static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan) { - dma_addr_t phys_complete; + u64 phys_complete; u64 completion; - completion = *chan->completion; + completion = *ioat_chan->completion; phys_complete = ioat_chansts_to_addr(completion); - dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, + dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__, (unsigned long long) phys_complete); - if (is_ioat_halted(completion)) { - u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - dev_err(to_dev(chan), "Channel halted, chanerr = %x\n", - chanerr); - - /* TODO do something to salvage the situation */ - } - return phys_complete; } -bool ioat_cleanup_preamble(struct ioat_chan_common *chan, - dma_addr_t *phys_complete) +static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan, + u64 *phys_complete) { - *phys_complete = ioat_get_current_completion(chan); - if (*phys_complete == chan->last_completion) + *phys_complete = ioat_get_current_completion(ioat_chan); + if (*phys_complete == ioat_chan->last_completion) return false; - clear_bit(IOAT_COMPLETION_ACK, &chan->state); - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + + clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); + mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); return true; } -static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) +static void +desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc) { - struct ioat_chan_common *chan = &ioat->base; - struct list_head *_desc, *n; - struct dma_async_tx_descriptor *tx; + struct ioat_dma_descriptor *hw = desc->hw; - dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n", - __func__, (unsigned long long) phys_complete); - list_for_each_safe(_desc, n, &ioat->used_desc) { - struct ioat_desc_sw *desc; + switch (hw->ctl_f.op) { + case IOAT_OP_PQ_VAL: + case IOAT_OP_PQ_VAL_16S: + { + struct ioat_pq_descriptor *pq = desc->pq; - prefetch(n); - desc = list_entry(_desc, typeof(*desc), node); - tx = &desc->txd; - /* - * Incoming DMA requests may use multiple descriptors, - * due to exceeding xfercap, perhaps. If so, only the - * last one will have a cookie, and require unmapping. - */ - dump_desc_dbg(ioat, desc); - if (tx->cookie) { - dma_cookie_complete(tx); - dma_descriptor_unmap(tx); - ioat->active -= desc->hw->tx_cnt; - if (tx->callback) { - tx->callback(tx->callback_param); - tx->callback = NULL; - } - } + /* check if there's error written */ + if (!pq->dwbes_f.wbes) + return; - if (tx->phys != phys_complete) { - /* - * a completed entry, but not the last, so clean - * up if the client is done with the descriptor - */ - if (async_tx_test_ack(tx)) - list_move_tail(&desc->node, &ioat->free_desc); - } else { - /* - * last used desc. Do not remove, so we can - * append from it. - */ - - /* if nothing else is pending, cancel the - * completion timeout - */ - if (n == &ioat->used_desc) { - dev_dbg(to_dev(chan), - "%s cancel completion timeout\n", - __func__); - clear_bit(IOAT_COMPLETION_PENDING, &chan->state); - } + /* need to set a chanerr var for checking to clear later */ - /* TODO check status bits? */ - break; - } - } + if (pq->dwbes_f.p_val_err) + *desc->result |= SUM_CHECK_P_RESULT; + + if (pq->dwbes_f.q_val_err) + *desc->result |= SUM_CHECK_Q_RESULT; - chan->last_completion = phys_complete; + return; + } + default: + return; + } } /** - * ioat1_cleanup - cleanup up finished descriptors - * @chan: ioat channel to be cleaned up - * - * To prevent lock contention we defer cleanup when the locks are - * contended with a terminal timeout that forces cleanup and catches - * completion notification errors. + * __cleanup - reclaim used descriptors + * @ioat: channel (ring) to clean */ -static void ioat1_cleanup(struct ioat_dma_chan *ioat) +static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) { - struct ioat_chan_common *chan = &ioat->base; - dma_addr_t phys_complete; - - prefetch(chan->completion); - - if (!spin_trylock_bh(&chan->cleanup_lock)) - return; + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; + struct ioat_ring_ent *desc; + bool seen_current = false; + int idx = ioat_chan->tail, i; + u16 active; - if (!ioat_cleanup_preamble(chan, &phys_complete)) { - spin_unlock_bh(&chan->cleanup_lock); - return; - } + dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n", + __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued); - if (!spin_trylock_bh(&ioat->desc_lock)) { - spin_unlock_bh(&chan->cleanup_lock); + /* + * At restart of the channel, the completion address and the + * channel status will be 0 due to starting a new chain. Since + * it's new chain and the first descriptor "fails", there is + * nothing to clean up. We do not want to reap the entire submitted + * chain due to this 0 address value and then BUG. + */ + if (!phys_complete) return; - } - __cleanup(ioat, phys_complete); + active = ioat_ring_active(ioat_chan); + for (i = 0; i < active && !seen_current; i++) { + struct dma_async_tx_descriptor *tx; - spin_unlock_bh(&ioat->desc_lock); - spin_unlock_bh(&chan->cleanup_lock); -} - -static void ioat1_timer_event(unsigned long data) -{ - struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); - struct ioat_chan_common *chan = &ioat->base; + smp_read_barrier_depends(); + prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1)); + desc = ioat_get_ring_ent(ioat_chan, idx + i); + dump_desc_dbg(ioat_chan, desc); - dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); + /* set err stat if we are using dwbes */ + if (ioat_dma->cap & IOAT_CAP_DWBES) + desc_get_errstat(ioat_chan, desc); - spin_lock_bh(&chan->cleanup_lock); - if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) { - struct ioat_desc_sw *desc; - - spin_lock_bh(&ioat->desc_lock); + tx = &desc->txd; + if (tx->cookie) { + dma_cookie_complete(tx); + dma_descriptor_unmap(tx); + if (tx->callback) { + tx->callback(tx->callback_param); + tx->callback = NULL; + } + } - /* restart active descriptors */ - desc = to_ioat_desc(ioat->used_desc.prev); - ioat_set_chainaddr(ioat, desc->txd.phys); - ioat_start(chan); + if (tx->phys == phys_complete) + seen_current = true; - ioat->pending = 0; - set_bit(IOAT_COMPLETION_PENDING, &chan->state); - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - spin_unlock_bh(&ioat->desc_lock); - } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { - dma_addr_t phys_complete; + /* skip extended descriptors */ + if (desc_has_ext(desc)) { + BUG_ON(i + 1 >= active); + i++; + } - spin_lock_bh(&ioat->desc_lock); - /* if we haven't made progress and we have already - * acknowledged a pending completion once, then be more - * forceful with a restart - */ - if (ioat_cleanup_preamble(chan, &phys_complete)) - __cleanup(ioat, phys_complete); - else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) - ioat1_reset_channel(ioat); - else { - u64 status = ioat_chansts(chan); - - /* manually update the last completion address */ - if (ioat_chansts_to_addr(status) != 0) - *chan->completion = status; - - set_bit(IOAT_COMPLETION_ACK, &chan->state); - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + /* cleanup super extended descriptors */ + if (desc->sed) { + ioat_free_sed(ioat_dma, desc->sed); + desc->sed = NULL; } - spin_unlock_bh(&ioat->desc_lock); } - spin_unlock_bh(&chan->cleanup_lock); -} - -enum dma_status -ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, - struct dma_tx_state *txstate) -{ - struct ioat_chan_common *chan = to_chan_common(c); - struct ioatdma_device *device = chan->device; - enum dma_status ret; - ret = dma_cookie_status(c, cookie, txstate); - if (ret == DMA_COMPLETE) - return ret; + /* finish all descriptor reads before incrementing tail */ + smp_mb(); + ioat_chan->tail = idx + i; + /* no active descs have written a completion? */ + BUG_ON(active && !seen_current); + ioat_chan->last_completion = phys_complete; - device->cleanup_fn((unsigned long) c); + if (active - i == 0) { + dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n", + __func__); + mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); + } - return dma_cookie_status(c, cookie, txstate); + /* 5 microsecond delay per pending descriptor */ + writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK), + ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET); } -static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) +static void ioat_cleanup(struct ioatdma_chan *ioat_chan) { - struct ioat_chan_common *chan = &ioat->base; - struct ioat_desc_sw *desc; - struct ioat_dma_descriptor *hw; + u64 phys_complete; - spin_lock_bh(&ioat->desc_lock); + spin_lock_bh(&ioat_chan->cleanup_lock); - desc = ioat1_dma_get_next_descriptor(ioat); + if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) + __cleanup(ioat_chan, phys_complete); - if (!desc) { - dev_err(to_dev(chan), - "Unable to start null desc - get next desc failed\n"); - spin_unlock_bh(&ioat->desc_lock); - return; - } + if (is_ioat_halted(*ioat_chan->completion)) { + u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - hw = desc->hw; - hw->ctl = 0; - hw->ctl_f.null = 1; - hw->ctl_f.int_en = 1; - hw->ctl_f.compl_write = 1; - /* set size to non-zero value (channel returns error when size is 0) */ - hw->size = NULL_DESC_BUFFER_SIZE; - hw->src_addr = 0; - hw->dst_addr = 0; - async_tx_ack(&desc->txd); - hw->next = 0; - list_add_tail(&desc->node, &ioat->used_desc); - dump_desc_dbg(ioat, desc); + if (chanerr & IOAT_CHANERR_HANDLE_MASK) { + mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); + ioat_eh(ioat_chan); + } + } - ioat_set_chainaddr(ioat, desc->txd.phys); - ioat_start(chan); - spin_unlock_bh(&ioat->desc_lock); + spin_unlock_bh(&ioat_chan->cleanup_lock); } -/* - * Perform a IOAT transaction to verify the HW works. - */ -#define IOAT_TEST_SIZE 2000 +void ioat_cleanup_event(unsigned long data) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data); + + ioat_cleanup(ioat_chan); + if (!test_bit(IOAT_RUN, &ioat_chan->state)) + return; + writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); +} -static void ioat_dma_test_callback(void *dma_async_param) +static void ioat_restart_channel(struct ioatdma_chan *ioat_chan) { - struct completion *cmp = dma_async_param; + u64 phys_complete; + + ioat_quiesce(ioat_chan, 0); + if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) + __cleanup(ioat_chan, phys_complete); - complete(cmp); + __ioat_restart_chan(ioat_chan); } -/** - * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. - * @device: device to be tested - */ -int ioat_dma_self_test(struct ioatdma_device *device) +static void ioat_eh(struct ioatdma_chan *ioat_chan) { - int i; - u8 *src; - u8 *dest; - struct dma_device *dma = &device->common; - struct device *dev = &device->pdev->dev; - struct dma_chan *dma_chan; + struct pci_dev *pdev = to_pdev(ioat_chan); + struct ioat_dma_descriptor *hw; struct dma_async_tx_descriptor *tx; - dma_addr_t dma_dest, dma_src; - dma_cookie_t cookie; - int err = 0; - struct completion cmp; - unsigned long tmo; - unsigned long flags; - - src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); - if (!src) - return -ENOMEM; - dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); - if (!dest) { - kfree(src); - return -ENOMEM; - } + u64 phys_complete; + struct ioat_ring_ent *desc; + u32 err_handled = 0; + u32 chanerr_int; + u32 chanerr; - /* Fill in src buffer */ - for (i = 0; i < IOAT_TEST_SIZE; i++) - src[i] = (u8)i; - - /* Start copy, using first DMA channel */ - dma_chan = container_of(dma->channels.next, struct dma_chan, - device_node); - if (dma->device_alloc_chan_resources(dma_chan) < 1) { - dev_err(dev, "selftest cannot allocate chan resource\n"); - err = -ENODEV; - goto out; - } + /* cleanup so tail points to descriptor that caused the error */ + if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) + __cleanup(ioat_chan, phys_complete); - dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma_src)) { - dev_err(dev, "mapping src buffer failed\n"); - goto free_resources; - } - dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, dma_dest)) { - dev_err(dev, "mapping dest buffer failed\n"); - goto unmap_src; - } - flags = DMA_PREP_INTERRUPT; - tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, - IOAT_TEST_SIZE, flags); - if (!tx) { - dev_err(dev, "Self-test prep failed, disabling\n"); - err = -ENODEV; - goto unmap_dma; - } + chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int); - async_tx_ack(tx); - init_completion(&cmp); - tx->callback = ioat_dma_test_callback; - tx->callback_param = &cmp; - cookie = tx->tx_submit(tx); - if (cookie < 0) { - dev_err(dev, "Self-test setup failed, disabling\n"); - err = -ENODEV; - goto unmap_dma; - } - dma->device_issue_pending(dma_chan); + dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n", + __func__, chanerr, chanerr_int); - tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); + desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail); + hw = desc->hw; + dump_desc_dbg(ioat_chan, desc); - if (tmo == 0 || - dma->device_tx_status(dma_chan, cookie, NULL) - != DMA_COMPLETE) { - dev_err(dev, "Self-test copy timed out, disabling\n"); - err = -ENODEV; - goto unmap_dma; - } - if (memcmp(src, dest, IOAT_TEST_SIZE)) { - dev_err(dev, "Self-test copy failed compare, disabling\n"); - err = -ENODEV; - goto free_resources; + switch (hw->ctl_f.op) { + case IOAT_OP_XOR_VAL: + if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { + *desc->result |= SUM_CHECK_P_RESULT; + err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; + } + break; + case IOAT_OP_PQ_VAL: + case IOAT_OP_PQ_VAL_16S: + if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { + *desc->result |= SUM_CHECK_P_RESULT; + err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; + } + if (chanerr & IOAT_CHANERR_XOR_Q_ERR) { + *desc->result |= SUM_CHECK_Q_RESULT; + err_handled |= IOAT_CHANERR_XOR_Q_ERR; + } + break; } -unmap_dma: - dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); -unmap_src: - dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); -free_resources: - dma->device_free_chan_resources(dma_chan); -out: - kfree(src); - kfree(dest); - return err; -} - -static char ioat_interrupt_style[32] = "msix"; -module_param_string(ioat_interrupt_style, ioat_interrupt_style, - sizeof(ioat_interrupt_style), 0644); -MODULE_PARM_DESC(ioat_interrupt_style, - "set ioat interrupt style: msix (default), msi, intx"); - -/** - * ioat_dma_setup_interrupts - setup interrupt handler - * @device: ioat device - */ -int ioat_dma_setup_interrupts(struct ioatdma_device *device) -{ - struct ioat_chan_common *chan; - struct pci_dev *pdev = device->pdev; - struct device *dev = &pdev->dev; - struct msix_entry *msix; - int i, j, msixcnt; - int err = -EINVAL; - u8 intrctrl = 0; - - if (!strcmp(ioat_interrupt_style, "msix")) - goto msix; - if (!strcmp(ioat_interrupt_style, "msi")) - goto msi; - if (!strcmp(ioat_interrupt_style, "intx")) - goto intx; - dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style); - goto err_no_irq; - -msix: - /* The number of MSI-X vectors should equal the number of channels */ - msixcnt = device->common.chancnt; - for (i = 0; i < msixcnt; i++) - device->msix_entries[i].entry = i; - - err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt); - if (err) - goto msi; - - for (i = 0; i < msixcnt; i++) { - msix = &device->msix_entries[i]; - chan = ioat_chan_by_index(device, i); - err = devm_request_irq(dev, msix->vector, - ioat_dma_do_interrupt_msix, 0, - "ioat-msix", chan); - if (err) { - for (j = 0; j < i; j++) { - msix = &device->msix_entries[j]; - chan = ioat_chan_by_index(device, j); - devm_free_irq(dev, msix->vector, chan); + /* fault on unhandled error or spurious halt */ + if (chanerr ^ err_handled || chanerr == 0) { + dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n", + __func__, chanerr, err_handled); + BUG(); + } else { /* cleanup the faulty descriptor */ + tx = &desc->txd; + if (tx->cookie) { + dma_cookie_complete(tx); + dma_descriptor_unmap(tx); + if (tx->callback) { + tx->callback(tx->callback_param); + tx->callback = NULL; } - goto msi; } } - intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; - device->irq_mode = IOAT_MSIX; - goto done; -msi: - err = pci_enable_msi(pdev); - if (err) - goto intx; + writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int); - err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, - "ioat-msi", device); - if (err) { - pci_disable_msi(pdev); - goto intx; - } - device->irq_mode = IOAT_MSI; - goto done; - -intx: - err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, - IRQF_SHARED, "ioat-intx", device); - if (err) - goto err_no_irq; - - device->irq_mode = IOAT_INTX; -done: - if (device->intr_quirk) - device->intr_quirk(device); - intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; - writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); - return 0; - -err_no_irq: - /* Disable all interrupt generation */ - writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); - device->irq_mode = IOAT_NOIRQ; - dev_err(dev, "no usable interrupts\n"); - return err; -} -EXPORT_SYMBOL(ioat_dma_setup_interrupts); + /* mark faulting descriptor as complete */ + *ioat_chan->completion = desc->txd.phys; -static void ioat_disable_interrupts(struct ioatdma_device *device) -{ - /* Disable all interrupt generation */ - writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); + spin_lock_bh(&ioat_chan->prep_lock); + ioat_restart_channel(ioat_chan); + spin_unlock_bh(&ioat_chan->prep_lock); } -int ioat_probe(struct ioatdma_device *device) +static void check_active(struct ioatdma_chan *ioat_chan) { - int err = -ENODEV; - struct dma_device *dma = &device->common; - struct pci_dev *pdev = device->pdev; - struct device *dev = &pdev->dev; - - /* DMA coherent memory pool for DMA descriptor allocations */ - device->dma_pool = pci_pool_create("dma_desc_pool", pdev, - sizeof(struct ioat_dma_descriptor), - 64, 0); - if (!device->dma_pool) { - err = -ENOMEM; - goto err_dma_pool; - } - - device->completion_pool = pci_pool_create("completion_pool", pdev, - sizeof(u64), SMP_CACHE_BYTES, - SMP_CACHE_BYTES); - - if (!device->completion_pool) { - err = -ENOMEM; - goto err_completion_pool; + if (ioat_ring_active(ioat_chan)) { + mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); + return; } - device->enumerate_channels(device); - - dma_cap_set(DMA_MEMCPY, dma->cap_mask); - dma->dev = &pdev->dev; + if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state)) + mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); + else if (ioat_chan->alloc_order > ioat_get_alloc_order()) { + /* if the ring is idle, empty, and oversized try to step + * down the size + */ + reshape_ring(ioat_chan, ioat_chan->alloc_order - 1); - if (!dma->chancnt) { - dev_err(dev, "channel enumeration error\n"); - goto err_setup_interrupts; + /* keep shrinking until we get back to our minimum + * default size + */ + if (ioat_chan->alloc_order > ioat_get_alloc_order()) + mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); } - err = ioat_dma_setup_interrupts(device); - if (err) - goto err_setup_interrupts; +} - err = device->self_test(device); - if (err) - goto err_self_test; +void ioat_timer_event(unsigned long data) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data); + dma_addr_t phys_complete; + u64 status; - return 0; + status = ioat_chansts(ioat_chan); -err_self_test: - ioat_disable_interrupts(device); -err_setup_interrupts: - pci_pool_destroy(device->completion_pool); -err_completion_pool: - pci_pool_destroy(device->dma_pool); -err_dma_pool: - return err; -} + /* when halted due to errors check for channel + * programming errors before advancing the completion state + */ + if (is_ioat_halted(status)) { + u32 chanerr; -int ioat_register(struct ioatdma_device *device) -{ - int err = dma_async_device_register(&device->common); + chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n", + __func__, chanerr); + if (test_bit(IOAT_RUN, &ioat_chan->state)) + BUG_ON(is_ioat_bug(chanerr)); + else /* we never got off the ground */ + return; + } - if (err) { - ioat_disable_interrupts(device); - pci_pool_destroy(device->completion_pool); - pci_pool_destroy(device->dma_pool); + /* if we haven't made progress and we have already + * acknowledged a pending completion once, then be more + * forceful with a restart + */ + spin_lock_bh(&ioat_chan->cleanup_lock); + if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) + __cleanup(ioat_chan, phys_complete); + else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { + spin_lock_bh(&ioat_chan->prep_lock); + ioat_restart_channel(ioat_chan); + spin_unlock_bh(&ioat_chan->prep_lock); + spin_unlock_bh(&ioat_chan->cleanup_lock); + return; + } else { + set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); + mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); } - return err; -} -/* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */ -static void ioat1_intr_quirk(struct ioatdma_device *device) -{ - struct pci_dev *pdev = device->pdev; - u32 dmactrl; - - pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); - if (pdev->msi_enabled) - dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; - else - dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN; - pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); + if (ioat_ring_active(ioat_chan)) + mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); + else { + spin_lock_bh(&ioat_chan->prep_lock); + check_active(ioat_chan); + spin_unlock_bh(&ioat_chan->prep_lock); + } + spin_unlock_bh(&ioat_chan->cleanup_lock); } -static ssize_t ring_size_show(struct dma_chan *c, char *page) +enum dma_status +ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, + struct dma_tx_state *txstate) { - struct ioat_dma_chan *ioat = to_ioat_chan(c); + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + enum dma_status ret; - return sprintf(page, "%d\n", ioat->desccount); -} -static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size); + ret = dma_cookie_status(c, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; -static ssize_t ring_active_show(struct dma_chan *c, char *page) -{ - struct ioat_dma_chan *ioat = to_ioat_chan(c); + ioat_cleanup(ioat_chan); - return sprintf(page, "%d\n", ioat->active); + return dma_cookie_status(c, cookie, txstate); } -static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); -static ssize_t cap_show(struct dma_chan *c, char *page) +static int ioat_irq_reinit(struct ioatdma_device *ioat_dma) { - struct dma_device *dma = c->device; + struct pci_dev *pdev = ioat_dma->pdev; + int irq = pdev->irq, i; - return sprintf(page, "copy%s%s%s%s%s\n", - dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "", - dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "", - dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "", - dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "", - dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : ""); + if (!is_bwd_ioat(pdev)) + return 0; -} -struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap); - -static ssize_t version_show(struct dma_chan *c, char *page) -{ - struct dma_device *dma = c->device; - struct ioatdma_device *device = to_ioatdma_device(dma); + switch (ioat_dma->irq_mode) { + case IOAT_MSIX: + for (i = 0; i < ioat_dma->dma_dev.chancnt; i++) { + struct msix_entry *msix = &ioat_dma->msix_entries[i]; + struct ioatdma_chan *ioat_chan; - return sprintf(page, "%d.%d\n", - device->version >> 4, device->version & 0xf); -} -struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version); - -static struct attribute *ioat1_attrs[] = { - &ring_size_attr.attr, - &ring_active_attr.attr, - &ioat_cap_attr.attr, - &ioat_version_attr.attr, - NULL, -}; - -static ssize_t -ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page) -{ - struct ioat_sysfs_entry *entry; - struct ioat_chan_common *chan; + ioat_chan = ioat_chan_by_index(ioat_dma, i); + devm_free_irq(&pdev->dev, msix->vector, ioat_chan); + } - entry = container_of(attr, struct ioat_sysfs_entry, attr); - chan = container_of(kobj, struct ioat_chan_common, kobj); + pci_disable_msix(pdev); + break; + case IOAT_MSI: + pci_disable_msi(pdev); + /* fall through */ + case IOAT_INTX: + devm_free_irq(&pdev->dev, irq, ioat_dma); + break; + default: + return 0; + } + ioat_dma->irq_mode = IOAT_NOIRQ; - if (!entry->show) - return -EIO; - return entry->show(&chan->common, page); + return ioat_dma_setup_interrupts(ioat_dma); } -const struct sysfs_ops ioat_sysfs_ops = { - .show = ioat_attr_show, -}; - -static struct kobj_type ioat1_ktype = { - .sysfs_ops = &ioat_sysfs_ops, - .default_attrs = ioat1_attrs, -}; - -void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type) +int ioat_reset_hw(struct ioatdma_chan *ioat_chan) { - struct dma_device *dma = &device->common; - struct dma_chan *c; + /* throw away whatever the channel was doing and get it + * initialized, with ioat3 specific workarounds + */ + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; + struct pci_dev *pdev = ioat_dma->pdev; + u32 chanerr; + u16 dev_id; + int err; + + ioat_quiesce(ioat_chan, msecs_to_jiffies(100)); - list_for_each_entry(c, &dma->channels, device_node) { - struct ioat_chan_common *chan = to_chan_common(c); - struct kobject *parent = &c->dev->device.kobj; - int err; + chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata"); + if (ioat_dma->version < IOAT_VER_3_3) { + /* clear any pending errors */ + err = pci_read_config_dword(pdev, + IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); if (err) { - dev_warn(to_dev(chan), - "sysfs init error (%d), continuing...\n", err); - kobject_put(&chan->kobj); - set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state); + dev_err(&pdev->dev, + "channel error register unreachable\n"); + return err; } - } -} + pci_write_config_dword(pdev, + IOAT_PCI_CHANERR_INT_OFFSET, chanerr); -void ioat_kobject_del(struct ioatdma_device *device) -{ - struct dma_device *dma = &device->common; - struct dma_chan *c; - - list_for_each_entry(c, &dma->channels, device_node) { - struct ioat_chan_common *chan = to_chan_common(c); - - if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) { - kobject_del(&chan->kobj); - kobject_put(&chan->kobj); + /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit + * (workaround for spurious config parity error after restart) + */ + pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); + if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { + pci_write_config_dword(pdev, + IOAT_PCI_DMAUNCERRSTS_OFFSET, + 0x10); } } -} -int ioat1_dma_probe(struct ioatdma_device *device, int dca) -{ - struct pci_dev *pdev = device->pdev; - struct dma_device *dma; - int err; + err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200)); + if (!err) + err = ioat_irq_reinit(ioat_dma); - device->intr_quirk = ioat1_intr_quirk; - device->enumerate_channels = ioat1_enumerate_channels; - device->self_test = ioat_dma_self_test; - device->timer_fn = ioat1_timer_event; - device->cleanup_fn = ioat1_cleanup_event; - dma = &device->common; - dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; - dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; - dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; - dma->device_free_chan_resources = ioat1_dma_free_chan_resources; - dma->device_tx_status = ioat_dma_tx_status; - - err = ioat_probe(device); - if (err) - return err; - err = ioat_register(device); if (err) - return err; - ioat_kobject_add(device, &ioat1_ktype); - - if (dca) - device->dca = ioat_dca_init(pdev, device->reg_base); + dev_err(&pdev->dev, "Failed to reset: %d\n", err); return err; } - -void ioat_dma_remove(struct ioatdma_device *device) -{ - struct dma_device *dma = &device->common; - - ioat_disable_interrupts(device); - - ioat_kobject_del(device); - - dma_async_device_unregister(dma); - - pci_pool_destroy(device->dma_pool); - pci_pool_destroy(device->completion_pool); - - INIT_LIST_HEAD(&dma->channels); -} diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 30f5c7eed..1bc084986 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -18,26 +18,32 @@ #define IOATDMA_H #include -#include "hw.h" -#include "registers.h" #include #include #include #include -#include +#include +#include +#include "registers.h" +#include "hw.h" #define IOAT_DMA_VERSION "4.00" -#define IOAT_LOW_COMPLETION_MASK 0xffffffc0 #define IOAT_DMA_DCA_ANY_CPU ~0 -#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) -#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) -#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd) -#define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev) -#define to_pdev(ioat_chan) ((ioat_chan)->device->pdev) +#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev) +#define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev) +#define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev) + +#define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80) -#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) +/* ioat hardware assumes at least two sources for raid operations */ +#define src_cnt_to_sw(x) ((x) + 2) +#define src_cnt_to_hw(x) ((x) - 2) +#define ndest_to_sw(x) ((x) + 1) +#define ndest_to_hw(x) ((x) - 1) +#define src16_cnt_to_sw(x) ((x) + 9) +#define src16_cnt_to_hw(x) ((x) - 9) /* * workaround for IOAT ver.3.0 null descriptor issue @@ -57,19 +63,15 @@ enum ioat_irq_mode { * @pdev: PCI-Express device * @reg_base: MMIO register space base address * @dma_pool: for allocating DMA descriptors - * @common: embedded struct dma_device + * @completion_pool: DMA buffers for completion ops + * @sed_hw_pool: DMA super descriptor pools + * @dma_dev: embedded struct dma_device * @version: version of ioatdma device * @msix_entries: irq handlers * @idx: per channel data * @dca: direct cache access context - * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) - * @enumerate_channels: hw version specific channel enumeration - * @reset_hw: hw version specific channel (re)initialization - * @cleanup_fn: select between the v2 and v3 cleanup routines - * @timer_fn: select between the v2 and v3 timer watchdog routines - * @self_test: hardware version specific self test for each supported op type - * - * Note: the v3 cleanup routine supports raid operations + * @irq_mode: interrupt mode (INTX, MSI, MSIX) + * @cap: read DMA capabilities register */ struct ioatdma_device { struct pci_dev *pdev; @@ -78,28 +80,21 @@ struct ioatdma_device { struct pci_pool *completion_pool; #define MAX_SED_POOLS 5 struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; - struct dma_device common; + struct dma_device dma_dev; u8 version; struct msix_entry msix_entries[4]; - struct ioat_chan_common *idx[4]; + struct ioatdma_chan *idx[4]; struct dca_provider *dca; enum ioat_irq_mode irq_mode; u32 cap; - void (*intr_quirk)(struct ioatdma_device *device); - int (*enumerate_channels)(struct ioatdma_device *device); - int (*reset_hw)(struct ioat_chan_common *chan); - void (*cleanup_fn)(unsigned long data); - void (*timer_fn)(unsigned long data); - int (*self_test)(struct ioatdma_device *device); }; -struct ioat_chan_common { - struct dma_chan common; +struct ioatdma_chan { + struct dma_chan dma_chan; void __iomem *reg_base; dma_addr_t last_completion; spinlock_t cleanup_lock; unsigned long state; - #define IOAT_COMPLETION_PENDING 0 #define IOAT_COMPLETION_ACK 1 #define IOAT_RESET_PENDING 2 #define IOAT_KOBJ_INIT_FAIL 3 @@ -110,11 +105,32 @@ struct ioat_chan_common { #define COMPLETION_TIMEOUT msecs_to_jiffies(100) #define IDLE_TIMEOUT msecs_to_jiffies(2000) #define RESET_DELAY msecs_to_jiffies(100) - struct ioatdma_device *device; + struct ioatdma_device *ioat_dma; dma_addr_t completion_dma; u64 *completion; struct tasklet_struct cleanup_task; struct kobject kobj; + +/* ioat v2 / v3 channel attributes + * @xfercap_log; log2 of channel max transfer length (for fast division) + * @head: allocated index + * @issued: hardware notification point + * @tail: cleanup index + * @dmacount: identical to 'head' except for occasionally resetting to zero + * @alloc_order: log2 of the number of allocated descriptors + * @produce: number of descriptors to produce at submit time + * @ring: software ring buffer implementation of hardware ring + * @prep_lock: serializes descriptor preparation (producers) + */ + size_t xfercap_log; + u16 head; + u16 issued; + u16 tail; + u16 dmacount; + u16 alloc_order; + u16 produce; + struct ioat_ring_ent **ring; + spinlock_t prep_lock; }; struct ioat_sysfs_entry { @@ -122,29 +138,12 @@ struct ioat_sysfs_entry { ssize_t (*show)(struct dma_chan *, char *); }; -/** - * struct ioat_dma_chan - internal representation of a DMA channel - */ -struct ioat_dma_chan { - struct ioat_chan_common base; - - size_t xfercap; /* XFERCAP register value expanded out */ - - spinlock_t desc_lock; - struct list_head free_desc; - struct list_head used_desc; - - int pending; - u16 desccount; - u16 active; -}; - /** * struct ioat_sed_ent - wrapper around super extended hardware descriptor * @hw: hardware SED - * @sed_dma: dma address for the SED - * @list: list member + * @dma: dma address for the SED * @parent: point to the dma descriptor that's the parent + * @hw_pool: descriptor pool index */ struct ioat_sed_ent { struct ioat_sed_raw_descriptor *hw; @@ -153,39 +152,57 @@ struct ioat_sed_ent { unsigned int hw_pool; }; -static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c) -{ - return container_of(c, struct ioat_chan_common, common); -} - -static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c) -{ - struct ioat_chan_common *chan = to_chan_common(c); - - return container_of(chan, struct ioat_dma_chan, base); -} - -/* wrapper around hardware descriptor format + additional software fields */ - /** - * struct ioat_desc_sw - wrapper around hardware descriptor + * struct ioat_ring_ent - wrapper around hardware descriptor * @hw: hardware DMA descriptor (for memcpy) - * @node: this descriptor will either be on the free list, - * or attached to a transaction list (tx_list) + * @xor: hardware xor descriptor + * @xor_ex: hardware xor extension descriptor + * @pq: hardware pq descriptor + * @pq_ex: hardware pq extension descriptor + * @pqu: hardware pq update descriptor + * @raw: hardware raw (un-typed) descriptor * @txd: the generic software descriptor for all engines + * @len: total transaction length for unmap + * @result: asynchronous result of validate operations * @id: identifier for debug + * @sed: pointer to super extended descriptor sw desc */ -struct ioat_desc_sw { - struct ioat_dma_descriptor *hw; - struct list_head node; + +struct ioat_ring_ent { + union { + struct ioat_dma_descriptor *hw; + struct ioat_xor_descriptor *xor; + struct ioat_xor_ext_descriptor *xor_ex; + struct ioat_pq_descriptor *pq; + struct ioat_pq_ext_descriptor *pq_ex; + struct ioat_pq_update_descriptor *pqu; + struct ioat_raw_descriptor *raw; + }; size_t len; - struct list_head tx_list; struct dma_async_tx_descriptor txd; + enum sum_check_flags *result; #ifdef DEBUG int id; #endif + struct ioat_sed_ent *sed; }; +extern const struct sysfs_ops ioat_sysfs_ops; +extern struct ioat_sysfs_entry ioat_version_attr; +extern struct ioat_sysfs_entry ioat_cap_attr; +extern int ioat_pending_level; +extern int ioat_ring_alloc_order; +extern struct kobj_type ioat_ktype; +extern struct kmem_cache *ioat_cache; +extern int ioat_ring_max_alloc_order; +extern struct kmem_cache *ioat_sed_cache; + +static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c) +{ + return container_of(c, struct ioatdma_chan, dma_chan); +} + +/* wrapper around hardware descriptor format + additional software fields */ #ifdef DEBUG #define set_desc_id(desc, i) ((desc)->id = (i)) #define desc_id(desc) ((desc)->id) @@ -195,10 +212,10 @@ struct ioat_desc_sw { #endif static inline void -__dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw, +__dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw, struct dma_async_tx_descriptor *tx, int id) { - struct device *dev = to_dev(chan); + struct device *dev = to_dev(ioat_chan); dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x" " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id, @@ -208,25 +225,25 @@ __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw, } #define dump_desc_dbg(c, d) \ - ({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; }) + ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; }) -static inline struct ioat_chan_common * -ioat_chan_by_index(struct ioatdma_device *device, int index) +static inline struct ioatdma_chan * +ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index) { - return device->idx[index]; + return ioat_dma->idx[index]; } -static inline u64 ioat_chansts_32(struct ioat_chan_common *chan) +static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan) { - u8 ver = chan->device->version; + u8 ver = ioat_chan->ioat_dma->version; u64 status; u32 status_lo; /* We need to read the low address first as this causes the * chipset to latch the upper bits for the subsequent read */ - status_lo = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver)); - status = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver)); + status_lo = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver)); + status = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver)); status <<= 32; status |= status_lo; @@ -235,16 +252,16 @@ static inline u64 ioat_chansts_32(struct ioat_chan_common *chan) #if BITS_PER_LONG == 64 -static inline u64 ioat_chansts(struct ioat_chan_common *chan) +static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan) { - u8 ver = chan->device->version; + u8 ver = ioat_chan->ioat_dma->version; u64 status; /* With IOAT v3.3 the status register is 64bit. */ if (ver >= IOAT_VER_3_3) - status = readq(chan->reg_base + IOAT_CHANSTS_OFFSET(ver)); + status = readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET(ver)); else - status = ioat_chansts_32(chan); + status = ioat_chansts_32(ioat_chan); return status; } @@ -253,56 +270,41 @@ static inline u64 ioat_chansts(struct ioat_chan_common *chan) #define ioat_chansts ioat_chansts_32 #endif -static inline void ioat_start(struct ioat_chan_common *chan) -{ - u8 ver = chan->device->version; - - writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); -} - static inline u64 ioat_chansts_to_addr(u64 status) { return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; } -static inline u32 ioat_chanerr(struct ioat_chan_common *chan) +static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan) { - return readl(chan->reg_base + IOAT_CHANERR_OFFSET); + return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); } -static inline void ioat_suspend(struct ioat_chan_common *chan) +static inline void ioat_suspend(struct ioatdma_chan *ioat_chan) { - u8 ver = chan->device->version; + u8 ver = ioat_chan->ioat_dma->version; - writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); + writeb(IOAT_CHANCMD_SUSPEND, + ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); } -static inline void ioat_reset(struct ioat_chan_common *chan) +static inline void ioat_reset(struct ioatdma_chan *ioat_chan) { - u8 ver = chan->device->version; + u8 ver = ioat_chan->ioat_dma->version; - writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); + writeb(IOAT_CHANCMD_RESET, + ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); } -static inline bool ioat_reset_pending(struct ioat_chan_common *chan) +static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan) { - u8 ver = chan->device->version; + u8 ver = ioat_chan->ioat_dma->version; u8 cmd; - cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); + cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET; } -static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr) -{ - struct ioat_chan_common *chan = &ioat->base; - - writel(addr & 0x00000000FFFFFFFF, - chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); - writel(addr >> 32, - chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); -} - static inline bool is_ioat_active(unsigned long status) { return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE); @@ -329,24 +331,111 @@ static inline bool is_ioat_bug(unsigned long err) return !!err; } -int ioat_probe(struct ioatdma_device *device); -int ioat_register(struct ioatdma_device *device); -int ioat1_dma_probe(struct ioatdma_device *dev, int dca); -int ioat_dma_self_test(struct ioatdma_device *device); -void ioat_dma_remove(struct ioatdma_device *device); +#define IOAT_MAX_ORDER 16 +#define ioat_get_alloc_order() \ + (min(ioat_ring_alloc_order, IOAT_MAX_ORDER)) +#define ioat_get_max_alloc_order() \ + (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER)) + +static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan) +{ + return 1 << ioat_chan->alloc_order; +} + +/* count of descriptors in flight with the engine */ +static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan) +{ + return CIRC_CNT(ioat_chan->head, ioat_chan->tail, + ioat_ring_size(ioat_chan)); +} + +/* count of descriptors pending submission to hardware */ +static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan) +{ + return CIRC_CNT(ioat_chan->head, ioat_chan->issued, + ioat_ring_size(ioat_chan)); +} + +static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan) +{ + return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan); +} + +static inline u16 +ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len) +{ + u16 num_descs = len >> ioat_chan->xfercap_log; + + num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1)); + return num_descs; +} + +static inline struct ioat_ring_ent * +ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx) +{ + return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)]; +} + +static inline void +ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr) +{ + writel(addr & 0x00000000FFFFFFFF, + ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); + writel(addr >> 32, + ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); +} + +/* IOAT Prep functions */ +struct dma_async_tx_descriptor * +ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, unsigned long flags); +struct dma_async_tx_descriptor * +ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags); +struct dma_async_tx_descriptor * +ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, + unsigned int src_cnt, size_t len, unsigned long flags); +struct dma_async_tx_descriptor * +ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, + unsigned int src_cnt, size_t len, + enum sum_check_flags *result, unsigned long flags); +struct dma_async_tx_descriptor * +ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, size_t len, + unsigned long flags); +struct dma_async_tx_descriptor * +ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, size_t len, + enum sum_check_flags *pqres, unsigned long flags); +struct dma_async_tx_descriptor * +ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, + unsigned int src_cnt, size_t len, unsigned long flags); +struct dma_async_tx_descriptor * +ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, + unsigned int src_cnt, size_t len, + enum sum_check_flags *result, unsigned long flags); + +/* IOAT Operation functions */ +irqreturn_t ioat_dma_do_interrupt(int irq, void *data); +irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data); +struct ioat_ring_ent ** +ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags); +void ioat_start_null_desc(struct ioatdma_chan *ioat_chan); +void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan); +int ioat_reset_hw(struct ioatdma_chan *ioat_chan); +enum dma_status +ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, + struct dma_tx_state *txstate); +void ioat_cleanup_event(unsigned long data); +void ioat_timer_event(unsigned long data); +int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs); +void ioat_issue_pending(struct dma_chan *chan); +void ioat_timer_event(unsigned long data); + +/* IOAT Init functions */ +bool is_bwd_ioat(struct pci_dev *pdev); struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); -dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan); -void ioat_init_channel(struct ioatdma_device *device, - struct ioat_chan_common *chan, int idx); -enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, - struct dma_tx_state *txstate); -bool ioat_cleanup_preamble(struct ioat_chan_common *chan, - dma_addr_t *phys_complete); -void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); -void ioat_kobject_del(struct ioatdma_device *device); -int ioat_dma_setup_interrupts(struct ioatdma_device *device); -void ioat_stop(struct ioat_chan_common *chan); -extern const struct sysfs_ops ioat_sysfs_ops; -extern struct ioat_sysfs_entry ioat_version_attr; -extern struct ioat_sysfs_entry ioat_cap_attr; +void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type); +void ioat_kobject_del(struct ioatdma_device *ioat_dma); +int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma); +void ioat_stop(struct ioatdma_chan *ioat_chan); #endif /* IOATDMA_H */ diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c deleted file mode 100644 index 69c7dfcad..000000000 --- a/drivers/dma/ioat/dma_v2.c +++ /dev/null @@ -1,916 +0,0 @@ -/* - * Intel I/OAT DMA Linux driver - * Copyright(c) 2004 - 2009 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - */ - -/* - * This driver supports an Intel I/OAT DMA engine (versions >= 2), which - * does asynchronous data movement and checksumming operations. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "dma.h" -#include "dma_v2.h" -#include "registers.h" -#include "hw.h" - -#include "../dmaengine.h" - -int ioat_ring_alloc_order = 8; -module_param(ioat_ring_alloc_order, int, 0644); -MODULE_PARM_DESC(ioat_ring_alloc_order, - "ioat2+: allocate 2^n descriptors per channel" - " (default: 8 max: 16)"); -static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER; -module_param(ioat_ring_max_alloc_order, int, 0644); -MODULE_PARM_DESC(ioat_ring_max_alloc_order, - "ioat2+: upper limit for ring size (default: 16)"); - -void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - - ioat->dmacount += ioat2_ring_pending(ioat); - ioat->issued = ioat->head; - writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); - dev_dbg(to_dev(chan), - "%s: head: %#x tail: %#x issued: %#x count: %#x\n", - __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); -} - -void ioat2_issue_pending(struct dma_chan *c) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - - if (ioat2_ring_pending(ioat)) { - spin_lock_bh(&ioat->prep_lock); - __ioat2_issue_pending(ioat); - spin_unlock_bh(&ioat->prep_lock); - } -} - -/** - * ioat2_update_pending - log pending descriptors - * @ioat: ioat2+ channel - * - * Check if the number of unsubmitted descriptors has exceeded the - * watermark. Called with prep_lock held - */ -static void ioat2_update_pending(struct ioat2_dma_chan *ioat) -{ - if (ioat2_ring_pending(ioat) > ioat_pending_level) - __ioat2_issue_pending(ioat); -} - -static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) -{ - struct ioat_ring_ent *desc; - struct ioat_dma_descriptor *hw; - - if (ioat2_ring_space(ioat) < 1) { - dev_err(to_dev(&ioat->base), - "Unable to start null desc - ring full\n"); - return; - } - - dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n", - __func__, ioat->head, ioat->tail, ioat->issued); - desc = ioat2_get_ring_ent(ioat, ioat->head); - - hw = desc->hw; - hw->ctl = 0; - hw->ctl_f.null = 1; - hw->ctl_f.int_en = 1; - hw->ctl_f.compl_write = 1; - /* set size to non-zero value (channel returns error when size is 0) */ - hw->size = NULL_DESC_BUFFER_SIZE; - hw->src_addr = 0; - hw->dst_addr = 0; - async_tx_ack(&desc->txd); - ioat2_set_chainaddr(ioat, desc->txd.phys); - dump_desc_dbg(ioat, desc); - wmb(); - ioat->head += 1; - __ioat2_issue_pending(ioat); -} - -static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) -{ - spin_lock_bh(&ioat->prep_lock); - __ioat2_start_null_desc(ioat); - spin_unlock_bh(&ioat->prep_lock); -} - -static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) -{ - struct ioat_chan_common *chan = &ioat->base; - struct dma_async_tx_descriptor *tx; - struct ioat_ring_ent *desc; - bool seen_current = false; - u16 active; - int idx = ioat->tail, i; - - dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", - __func__, ioat->head, ioat->tail, ioat->issued); - - active = ioat2_ring_active(ioat); - for (i = 0; i < active && !seen_current; i++) { - smp_read_barrier_depends(); - prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); - desc = ioat2_get_ring_ent(ioat, idx + i); - tx = &desc->txd; - dump_desc_dbg(ioat, desc); - if (tx->cookie) { - dma_descriptor_unmap(tx); - dma_cookie_complete(tx); - if (tx->callback) { - tx->callback(tx->callback_param); - tx->callback = NULL; - } - } - - if (tx->phys == phys_complete) - seen_current = true; - } - smp_mb(); /* finish all descriptor reads before incrementing tail */ - ioat->tail = idx + i; - BUG_ON(active && !seen_current); /* no active descs have written a completion? */ - - chan->last_completion = phys_complete; - if (active - i == 0) { - dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", - __func__); - clear_bit(IOAT_COMPLETION_PENDING, &chan->state); - mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); - } -} - -/** - * ioat2_cleanup - clean finished descriptors (advance tail pointer) - * @chan: ioat channel to be cleaned up - */ -static void ioat2_cleanup(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - dma_addr_t phys_complete; - - spin_lock_bh(&chan->cleanup_lock); - if (ioat_cleanup_preamble(chan, &phys_complete)) - __cleanup(ioat, phys_complete); - spin_unlock_bh(&chan->cleanup_lock); -} - -void ioat2_cleanup_event(unsigned long data) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); - struct ioat_chan_common *chan = &ioat->base; - - ioat2_cleanup(ioat); - if (!test_bit(IOAT_RUN, &chan->state)) - return; - writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); -} - -void __ioat2_restart_chan(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - - /* set the tail to be re-issued */ - ioat->issued = ioat->tail; - ioat->dmacount = 0; - set_bit(IOAT_COMPLETION_PENDING, &chan->state); - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - - dev_dbg(to_dev(chan), - "%s: head: %#x tail: %#x issued: %#x count: %#x\n", - __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); - - if (ioat2_ring_pending(ioat)) { - struct ioat_ring_ent *desc; - - desc = ioat2_get_ring_ent(ioat, ioat->tail); - ioat2_set_chainaddr(ioat, desc->txd.phys); - __ioat2_issue_pending(ioat); - } else - __ioat2_start_null_desc(ioat); -} - -int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo) -{ - unsigned long end = jiffies + tmo; - int err = 0; - u32 status; - - status = ioat_chansts(chan); - if (is_ioat_active(status) || is_ioat_idle(status)) - ioat_suspend(chan); - while (is_ioat_active(status) || is_ioat_idle(status)) { - if (tmo && time_after(jiffies, end)) { - err = -ETIMEDOUT; - break; - } - status = ioat_chansts(chan); - cpu_relax(); - } - - return err; -} - -int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo) -{ - unsigned long end = jiffies + tmo; - int err = 0; - - ioat_reset(chan); - while (ioat_reset_pending(chan)) { - if (end && time_after(jiffies, end)) { - err = -ETIMEDOUT; - break; - } - cpu_relax(); - } - - return err; -} - -static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - dma_addr_t phys_complete; - - ioat2_quiesce(chan, 0); - if (ioat_cleanup_preamble(chan, &phys_complete)) - __cleanup(ioat, phys_complete); - - __ioat2_restart_chan(ioat); -} - -static void check_active(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - - if (ioat2_ring_active(ioat)) { - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - return; - } - - if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state)) - mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); - else if (ioat->alloc_order > ioat_get_alloc_order()) { - /* if the ring is idle, empty, and oversized try to step - * down the size - */ - reshape_ring(ioat, ioat->alloc_order - 1); - - /* keep shrinking until we get back to our minimum - * default size - */ - if (ioat->alloc_order > ioat_get_alloc_order()) - mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); - } - -} - -void ioat2_timer_event(unsigned long data) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); - struct ioat_chan_common *chan = &ioat->base; - dma_addr_t phys_complete; - u64 status; - - status = ioat_chansts(chan); - - /* when halted due to errors check for channel - * programming errors before advancing the completion state - */ - if (is_ioat_halted(status)) { - u32 chanerr; - - chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - dev_err(to_dev(chan), "%s: Channel halted (%x)\n", - __func__, chanerr); - if (test_bit(IOAT_RUN, &chan->state)) - BUG_ON(is_ioat_bug(chanerr)); - else /* we never got off the ground */ - return; - } - - /* if we haven't made progress and we have already - * acknowledged a pending completion once, then be more - * forceful with a restart - */ - spin_lock_bh(&chan->cleanup_lock); - if (ioat_cleanup_preamble(chan, &phys_complete)) - __cleanup(ioat, phys_complete); - else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { - spin_lock_bh(&ioat->prep_lock); - ioat2_restart_channel(ioat); - spin_unlock_bh(&ioat->prep_lock); - spin_unlock_bh(&chan->cleanup_lock); - return; - } else { - set_bit(IOAT_COMPLETION_ACK, &chan->state); - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - } - - - if (ioat2_ring_active(ioat)) - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - else { - spin_lock_bh(&ioat->prep_lock); - check_active(ioat); - spin_unlock_bh(&ioat->prep_lock); - } - spin_unlock_bh(&chan->cleanup_lock); -} - -static int ioat2_reset_hw(struct ioat_chan_common *chan) -{ - /* throw away whatever the channel was doing and get it initialized */ - u32 chanerr; - - ioat2_quiesce(chan, msecs_to_jiffies(100)); - - chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); - - return ioat2_reset_sync(chan, msecs_to_jiffies(200)); -} - -/** - * ioat2_enumerate_channels - find and initialize the device's channels - * @device: the device to be enumerated - */ -int ioat2_enumerate_channels(struct ioatdma_device *device) -{ - struct ioat2_dma_chan *ioat; - struct device *dev = &device->pdev->dev; - struct dma_device *dma = &device->common; - u8 xfercap_log; - int i; - - INIT_LIST_HEAD(&dma->channels); - dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); - dma->chancnt &= 0x1f; /* bits [4:0] valid */ - if (dma->chancnt > ARRAY_SIZE(device->idx)) { - dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", - dma->chancnt, ARRAY_SIZE(device->idx)); - dma->chancnt = ARRAY_SIZE(device->idx); - } - xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET); - xfercap_log &= 0x1f; /* bits [4:0] valid */ - if (xfercap_log == 0) - return 0; - dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); - - /* FIXME which i/oat version is i7300? */ -#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL - if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) - dma->chancnt--; -#endif - for (i = 0; i < dma->chancnt; i++) { - ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); - if (!ioat) - break; - - ioat_init_channel(device, &ioat->base, i); - ioat->xfercap_log = xfercap_log; - spin_lock_init(&ioat->prep_lock); - if (device->reset_hw(&ioat->base)) { - i = 0; - break; - } - } - dma->chancnt = i; - return i; -} - -static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) -{ - struct dma_chan *c = tx->chan; - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - struct ioat_chan_common *chan = &ioat->base; - dma_cookie_t cookie; - - cookie = dma_cookie_assign(tx); - dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); - - if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &chan->state)) - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - - /* make descriptor updates visible before advancing ioat->head, - * this is purposefully not smp_wmb() since we are also - * publishing the descriptor updates to a dma device - */ - wmb(); - - ioat->head += ioat->produce; - - ioat2_update_pending(ioat); - spin_unlock_bh(&ioat->prep_lock); - - return cookie; -} - -static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags) -{ - struct ioat_dma_descriptor *hw; - struct ioat_ring_ent *desc; - struct ioatdma_device *dma; - dma_addr_t phys; - - dma = to_ioatdma_device(chan->device); - hw = pci_pool_alloc(dma->dma_pool, flags, &phys); - if (!hw) - return NULL; - memset(hw, 0, sizeof(*hw)); - - desc = kmem_cache_zalloc(ioat2_cache, flags); - if (!desc) { - pci_pool_free(dma->dma_pool, hw, phys); - return NULL; - } - - dma_async_tx_descriptor_init(&desc->txd, chan); - desc->txd.tx_submit = ioat2_tx_submit_unlock; - desc->hw = hw; - desc->txd.phys = phys; - return desc; -} - -static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) -{ - struct ioatdma_device *dma; - - dma = to_ioatdma_device(chan->device); - pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys); - kmem_cache_free(ioat2_cache, desc); -} - -static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags) -{ - struct ioat_ring_ent **ring; - int descs = 1 << order; - int i; - - if (order > ioat_get_max_alloc_order()) - return NULL; - - /* allocate the array to hold the software ring */ - ring = kcalloc(descs, sizeof(*ring), flags); - if (!ring) - return NULL; - for (i = 0; i < descs; i++) { - ring[i] = ioat2_alloc_ring_ent(c, flags); - if (!ring[i]) { - while (i--) - ioat2_free_ring_ent(ring[i], c); - kfree(ring); - return NULL; - } - set_desc_id(ring[i], i); - } - - /* link descs */ - for (i = 0; i < descs-1; i++) { - struct ioat_ring_ent *next = ring[i+1]; - struct ioat_dma_descriptor *hw = ring[i]->hw; - - hw->next = next->txd.phys; - } - ring[i]->hw->next = ring[0]->txd.phys; - - return ring; -} - -void ioat2_free_chan_resources(struct dma_chan *c); - -/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring - * @chan: channel to be initialized - */ -int ioat2_alloc_chan_resources(struct dma_chan *c) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - struct ioat_chan_common *chan = &ioat->base; - struct ioat_ring_ent **ring; - u64 status; - int order; - int i = 0; - - /* have we already been set up? */ - if (ioat->ring) - return 1 << ioat->alloc_order; - - /* Setup register to interrupt and write completion status on error */ - writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); - - /* allocate a completion writeback area */ - /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ - chan->completion = pci_pool_alloc(chan->device->completion_pool, - GFP_KERNEL, &chan->completion_dma); - if (!chan->completion) - return -ENOMEM; - - memset(chan->completion, 0, sizeof(*chan->completion)); - writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, - chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); - writel(((u64) chan->completion_dma) >> 32, - chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); - - order = ioat_get_alloc_order(); - ring = ioat2_alloc_ring(c, order, GFP_KERNEL); - if (!ring) - return -ENOMEM; - - spin_lock_bh(&chan->cleanup_lock); - spin_lock_bh(&ioat->prep_lock); - ioat->ring = ring; - ioat->head = 0; - ioat->issued = 0; - ioat->tail = 0; - ioat->alloc_order = order; - set_bit(IOAT_RUN, &chan->state); - spin_unlock_bh(&ioat->prep_lock); - spin_unlock_bh(&chan->cleanup_lock); - - ioat2_start_null_desc(ioat); - - /* check that we got off the ground */ - do { - udelay(1); - status = ioat_chansts(chan); - } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status)); - - if (is_ioat_active(status) || is_ioat_idle(status)) { - return 1 << ioat->alloc_order; - } else { - u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - - dev_WARN(to_dev(chan), - "failed to start channel chanerr: %#x\n", chanerr); - ioat2_free_chan_resources(c); - return -EFAULT; - } -} - -bool reshape_ring(struct ioat2_dma_chan *ioat, int order) -{ - /* reshape differs from normal ring allocation in that we want - * to allocate a new software ring while only - * extending/truncating the hardware ring - */ - struct ioat_chan_common *chan = &ioat->base; - struct dma_chan *c = &chan->common; - const u32 curr_size = ioat2_ring_size(ioat); - const u16 active = ioat2_ring_active(ioat); - const u32 new_size = 1 << order; - struct ioat_ring_ent **ring; - u16 i; - - if (order > ioat_get_max_alloc_order()) - return false; - - /* double check that we have at least 1 free descriptor */ - if (active == curr_size) - return false; - - /* when shrinking, verify that we can hold the current active - * set in the new ring - */ - if (active >= new_size) - return false; - - /* allocate the array to hold the software ring */ - ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT); - if (!ring) - return false; - - /* allocate/trim descriptors as needed */ - if (new_size > curr_size) { - /* copy current descriptors to the new ring */ - for (i = 0; i < curr_size; i++) { - u16 curr_idx = (ioat->tail+i) & (curr_size-1); - u16 new_idx = (ioat->tail+i) & (new_size-1); - - ring[new_idx] = ioat->ring[curr_idx]; - set_desc_id(ring[new_idx], new_idx); - } - - /* add new descriptors to the ring */ - for (i = curr_size; i < new_size; i++) { - u16 new_idx = (ioat->tail+i) & (new_size-1); - - ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT); - if (!ring[new_idx]) { - while (i--) { - u16 new_idx = (ioat->tail+i) & (new_size-1); - - ioat2_free_ring_ent(ring[new_idx], c); - } - kfree(ring); - return false; - } - set_desc_id(ring[new_idx], new_idx); - } - - /* hw link new descriptors */ - for (i = curr_size-1; i < new_size; i++) { - u16 new_idx = (ioat->tail+i) & (new_size-1); - struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)]; - struct ioat_dma_descriptor *hw = ring[new_idx]->hw; - - hw->next = next->txd.phys; - } - } else { - struct ioat_dma_descriptor *hw; - struct ioat_ring_ent *next; - - /* copy current descriptors to the new ring, dropping the - * removed descriptors - */ - for (i = 0; i < new_size; i++) { - u16 curr_idx = (ioat->tail+i) & (curr_size-1); - u16 new_idx = (ioat->tail+i) & (new_size-1); - - ring[new_idx] = ioat->ring[curr_idx]; - set_desc_id(ring[new_idx], new_idx); - } - - /* free deleted descriptors */ - for (i = new_size; i < curr_size; i++) { - struct ioat_ring_ent *ent; - - ent = ioat2_get_ring_ent(ioat, ioat->tail+i); - ioat2_free_ring_ent(ent, c); - } - - /* fix up hardware ring */ - hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw; - next = ring[(ioat->tail+new_size) & (new_size-1)]; - hw->next = next->txd.phys; - } - - dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", - __func__, new_size); - - kfree(ioat->ring); - ioat->ring = ring; - ioat->alloc_order = order; - - return true; -} - -/** - * ioat2_check_space_lock - verify space and grab ring producer lock - * @ioat: ioat2,3 channel (ring) to operate on - * @num_descs: allocation length - */ -int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs) -{ - struct ioat_chan_common *chan = &ioat->base; - bool retry; - - retry: - spin_lock_bh(&ioat->prep_lock); - /* never allow the last descriptor to be consumed, we need at - * least one free at all times to allow for on-the-fly ring - * resizing. - */ - if (likely(ioat2_ring_space(ioat) > num_descs)) { - dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n", - __func__, num_descs, ioat->head, ioat->tail, ioat->issued); - ioat->produce = num_descs; - return 0; /* with ioat->prep_lock held */ - } - retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state); - spin_unlock_bh(&ioat->prep_lock); - - /* is another cpu already trying to expand the ring? */ - if (retry) - goto retry; - - spin_lock_bh(&chan->cleanup_lock); - spin_lock_bh(&ioat->prep_lock); - retry = reshape_ring(ioat, ioat->alloc_order + 1); - clear_bit(IOAT_RESHAPE_PENDING, &chan->state); - spin_unlock_bh(&ioat->prep_lock); - spin_unlock_bh(&chan->cleanup_lock); - - /* if we were able to expand the ring retry the allocation */ - if (retry) - goto retry; - - if (printk_ratelimit()) - dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n", - __func__, num_descs, ioat->head, ioat->tail, ioat->issued); - - /* progress reclaim in the allocation failure case we may be - * called under bh_disabled so we need to trigger the timer - * event directly - */ - if (time_is_before_jiffies(chan->timer.expires) - && timer_pending(&chan->timer)) { - struct ioatdma_device *device = chan->device; - - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - device->timer_fn((unsigned long) &chan->common); - } - - return -ENOMEM; -} - -struct dma_async_tx_descriptor * -ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, - dma_addr_t dma_src, size_t len, unsigned long flags) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - struct ioat_dma_descriptor *hw; - struct ioat_ring_ent *desc; - dma_addr_t dst = dma_dest; - dma_addr_t src = dma_src; - size_t total_len = len; - int num_descs, idx, i; - - num_descs = ioat2_xferlen_to_descs(ioat, len); - if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0) - idx = ioat->head; - else - return NULL; - i = 0; - do { - size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log); - - desc = ioat2_get_ring_ent(ioat, idx + i); - hw = desc->hw; - - hw->size = copy; - hw->ctl = 0; - hw->src_addr = src; - hw->dst_addr = dst; - - len -= copy; - dst += copy; - src += copy; - dump_desc_dbg(ioat, desc); - } while (++i < num_descs); - - desc->txd.flags = flags; - desc->len = total_len; - hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); - hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); - hw->ctl_f.compl_write = 1; - dump_desc_dbg(ioat, desc); - /* we leave the channel locked to ensure in order submission */ - - return &desc->txd; -} - -/** - * ioat2_free_chan_resources - release all the descriptors - * @chan: the channel to be cleaned - */ -void ioat2_free_chan_resources(struct dma_chan *c) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - struct ioat_chan_common *chan = &ioat->base; - struct ioatdma_device *device = chan->device; - struct ioat_ring_ent *desc; - const u16 total_descs = 1 << ioat->alloc_order; - int descs; - int i; - - /* Before freeing channel resources first check - * if they have been previously allocated for this channel. - */ - if (!ioat->ring) - return; - - ioat_stop(chan); - device->reset_hw(chan); - - spin_lock_bh(&chan->cleanup_lock); - spin_lock_bh(&ioat->prep_lock); - descs = ioat2_ring_space(ioat); - dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs); - for (i = 0; i < descs; i++) { - desc = ioat2_get_ring_ent(ioat, ioat->head + i); - ioat2_free_ring_ent(desc, c); - } - - if (descs < total_descs) - dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", - total_descs - descs); - - for (i = 0; i < total_descs - descs; i++) { - desc = ioat2_get_ring_ent(ioat, ioat->tail + i); - dump_desc_dbg(ioat, desc); - ioat2_free_ring_ent(desc, c); - } - - kfree(ioat->ring); - ioat->ring = NULL; - ioat->alloc_order = 0; - pci_pool_free(device->completion_pool, chan->completion, - chan->completion_dma); - spin_unlock_bh(&ioat->prep_lock); - spin_unlock_bh(&chan->cleanup_lock); - - chan->last_completion = 0; - chan->completion_dma = 0; - ioat->dmacount = 0; -} - -static ssize_t ring_size_show(struct dma_chan *c, char *page) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - - return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1); -} -static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size); - -static ssize_t ring_active_show(struct dma_chan *c, char *page) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - - /* ...taken outside the lock, no need to be precise */ - return sprintf(page, "%d\n", ioat2_ring_active(ioat)); -} -static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); - -static struct attribute *ioat2_attrs[] = { - &ring_size_attr.attr, - &ring_active_attr.attr, - &ioat_cap_attr.attr, - &ioat_version_attr.attr, - NULL, -}; - -struct kobj_type ioat2_ktype = { - .sysfs_ops = &ioat_sysfs_ops, - .default_attrs = ioat2_attrs, -}; - -int ioat2_dma_probe(struct ioatdma_device *device, int dca) -{ - struct pci_dev *pdev = device->pdev; - struct dma_device *dma; - struct dma_chan *c; - struct ioat_chan_common *chan; - int err; - - device->enumerate_channels = ioat2_enumerate_channels; - device->reset_hw = ioat2_reset_hw; - device->cleanup_fn = ioat2_cleanup_event; - device->timer_fn = ioat2_timer_event; - device->self_test = ioat_dma_self_test; - dma = &device->common; - dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; - dma->device_issue_pending = ioat2_issue_pending; - dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; - dma->device_free_chan_resources = ioat2_free_chan_resources; - dma->device_tx_status = ioat_dma_tx_status; - - err = ioat_probe(device); - if (err) - return err; - - list_for_each_entry(c, &dma->channels, device_node) { - chan = to_chan_common(c); - writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, - chan->reg_base + IOAT_DCACTRL_OFFSET); - } - - err = ioat_register(device); - if (err) - return err; - - ioat_kobject_add(device, &ioat2_ktype); - - if (dca) - device->dca = ioat2_dca_init(pdev, device->reg_base); - - return err; -} diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h deleted file mode 100644 index bf24ebe87..000000000 --- a/drivers/dma/ioat/dma_v2.h +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. - */ -#ifndef IOATDMA_V2_H -#define IOATDMA_V2_H - -#include -#include -#include "dma.h" -#include "hw.h" - - -extern int ioat_pending_level; -extern int ioat_ring_alloc_order; - -/* - * workaround for IOAT ver.3.0 null descriptor issue - * (channel returns error when size is 0) - */ -#define NULL_DESC_BUFFER_SIZE 1 - -#define IOAT_MAX_ORDER 16 -#define ioat_get_alloc_order() \ - (min(ioat_ring_alloc_order, IOAT_MAX_ORDER)) -#define ioat_get_max_alloc_order() \ - (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER)) - -/* struct ioat2_dma_chan - ioat v2 / v3 channel attributes - * @base: common ioat channel parameters - * @xfercap_log; log2 of channel max transfer length (for fast division) - * @head: allocated index - * @issued: hardware notification point - * @tail: cleanup index - * @dmacount: identical to 'head' except for occasionally resetting to zero - * @alloc_order: log2 of the number of allocated descriptors - * @produce: number of descriptors to produce at submit time - * @ring: software ring buffer implementation of hardware ring - * @prep_lock: serializes descriptor preparation (producers) - */ -struct ioat2_dma_chan { - struct ioat_chan_common base; - size_t xfercap_log; - u16 head; - u16 issued; - u16 tail; - u16 dmacount; - u16 alloc_order; - u16 produce; - struct ioat_ring_ent **ring; - spinlock_t prep_lock; -}; - -static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c) -{ - struct ioat_chan_common *chan = to_chan_common(c); - - return container_of(chan, struct ioat2_dma_chan, base); -} - -static inline u32 ioat2_ring_size(struct ioat2_dma_chan *ioat) -{ - return 1 << ioat->alloc_order; -} - -/* count of descriptors in flight with the engine */ -static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat) -{ - return CIRC_CNT(ioat->head, ioat->tail, ioat2_ring_size(ioat)); -} - -/* count of descriptors pending submission to hardware */ -static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat) -{ - return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat)); -} - -static inline u32 ioat2_ring_space(struct ioat2_dma_chan *ioat) -{ - return ioat2_ring_size(ioat) - ioat2_ring_active(ioat); -} - -static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len) -{ - u16 num_descs = len >> ioat->xfercap_log; - - num_descs += !!(len & ((1 << ioat->xfercap_log) - 1)); - return num_descs; -} - -/** - * struct ioat_ring_ent - wrapper around hardware descriptor - * @hw: hardware DMA descriptor (for memcpy) - * @fill: hardware fill descriptor - * @xor: hardware xor descriptor - * @xor_ex: hardware xor extension descriptor - * @pq: hardware pq descriptor - * @pq_ex: hardware pq extension descriptor - * @pqu: hardware pq update descriptor - * @raw: hardware raw (un-typed) descriptor - * @txd: the generic software descriptor for all engines - * @len: total transaction length for unmap - * @result: asynchronous result of validate operations - * @id: identifier for debug - */ - -struct ioat_ring_ent { - union { - struct ioat_dma_descriptor *hw; - struct ioat_xor_descriptor *xor; - struct ioat_xor_ext_descriptor *xor_ex; - struct ioat_pq_descriptor *pq; - struct ioat_pq_ext_descriptor *pq_ex; - struct ioat_pq_update_descriptor *pqu; - struct ioat_raw_descriptor *raw; - }; - size_t len; - struct dma_async_tx_descriptor txd; - enum sum_check_flags *result; - #ifdef DEBUG - int id; - #endif - struct ioat_sed_ent *sed; -}; - -static inline struct ioat_ring_ent * -ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx) -{ - return ioat->ring[idx & (ioat2_ring_size(ioat) - 1)]; -} - -static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) -{ - struct ioat_chan_common *chan = &ioat->base; - - writel(addr & 0x00000000FFFFFFFF, - chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); - writel(addr >> 32, - chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); -} - -int ioat2_dma_probe(struct ioatdma_device *dev, int dca); -int ioat3_dma_probe(struct ioatdma_device *dev, int dca); -struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); -struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); -int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); -int ioat2_enumerate_channels(struct ioatdma_device *device); -struct dma_async_tx_descriptor * -ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, - dma_addr_t dma_src, size_t len, unsigned long flags); -void ioat2_issue_pending(struct dma_chan *chan); -int ioat2_alloc_chan_resources(struct dma_chan *c); -void ioat2_free_chan_resources(struct dma_chan *c); -void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); -bool reshape_ring(struct ioat2_dma_chan *ioat, int order); -void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); -void ioat2_cleanup_event(unsigned long data); -void ioat2_timer_event(unsigned long data); -int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo); -int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo); -extern struct kobj_type ioat2_ktype; -extern struct kmem_cache *ioat2_cache; -#endif /* IOATDMA_V2_H */ diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c deleted file mode 100644 index 64790a45e..000000000 --- a/drivers/dma/ioat/dma_v3.c +++ /dev/null @@ -1,1717 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * BSD LICENSE - * - * Copyright(c) 2004-2009 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * Support routines for v3+ hardware - */ -#include -#include -#include -#include -#include -#include -#include "../dmaengine.h" -#include "registers.h" -#include "hw.h" -#include "dma.h" -#include "dma_v2.h" - -extern struct kmem_cache *ioat3_sed_cache; - -/* ioat hardware assumes at least two sources for raid operations */ -#define src_cnt_to_sw(x) ((x) + 2) -#define src_cnt_to_hw(x) ((x) - 2) -#define ndest_to_sw(x) ((x) + 1) -#define ndest_to_hw(x) ((x) - 1) -#define src16_cnt_to_sw(x) ((x) + 9) -#define src16_cnt_to_hw(x) ((x) - 9) - -/* provide a lookup table for setting the source address in the base or - * extended descriptor of an xor or pq descriptor - */ -static const u8 xor_idx_to_desc = 0xe0; -static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 }; -static const u8 pq_idx_to_desc = 0xf8; -static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2 }; -static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; -static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, - 0, 1, 2, 3, 4, 5, 6 }; - -static void ioat3_eh(struct ioat2_dma_chan *ioat); - -static void xor_set_src(struct ioat_raw_descriptor *descs[2], - dma_addr_t addr, u32 offset, int idx) -{ - struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; - - raw->field[xor_idx_to_field[idx]] = addr + offset; -} - -static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx) -{ - struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; - - return raw->field[pq_idx_to_field[idx]]; -} - -static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx) -{ - struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; - - return raw->field[pq16_idx_to_field[idx]]; -} - -static void pq_set_src(struct ioat_raw_descriptor *descs[2], - dma_addr_t addr, u32 offset, u8 coef, int idx) -{ - struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0]; - struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; - - raw->field[pq_idx_to_field[idx]] = addr + offset; - pq->coef[idx] = coef; -} - -static bool is_jf_ioat(struct pci_dev *pdev) -{ - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT_JSF0: - case PCI_DEVICE_ID_INTEL_IOAT_JSF1: - case PCI_DEVICE_ID_INTEL_IOAT_JSF2: - case PCI_DEVICE_ID_INTEL_IOAT_JSF3: - case PCI_DEVICE_ID_INTEL_IOAT_JSF4: - case PCI_DEVICE_ID_INTEL_IOAT_JSF5: - case PCI_DEVICE_ID_INTEL_IOAT_JSF6: - case PCI_DEVICE_ID_INTEL_IOAT_JSF7: - case PCI_DEVICE_ID_INTEL_IOAT_JSF8: - case PCI_DEVICE_ID_INTEL_IOAT_JSF9: - return true; - default: - return false; - } -} - -static bool is_snb_ioat(struct pci_dev *pdev) -{ - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT_SNB0: - case PCI_DEVICE_ID_INTEL_IOAT_SNB1: - case PCI_DEVICE_ID_INTEL_IOAT_SNB2: - case PCI_DEVICE_ID_INTEL_IOAT_SNB3: - case PCI_DEVICE_ID_INTEL_IOAT_SNB4: - case PCI_DEVICE_ID_INTEL_IOAT_SNB5: - case PCI_DEVICE_ID_INTEL_IOAT_SNB6: - case PCI_DEVICE_ID_INTEL_IOAT_SNB7: - case PCI_DEVICE_ID_INTEL_IOAT_SNB8: - case PCI_DEVICE_ID_INTEL_IOAT_SNB9: - return true; - default: - return false; - } -} - -static bool is_ivb_ioat(struct pci_dev *pdev) -{ - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT_IVB0: - case PCI_DEVICE_ID_INTEL_IOAT_IVB1: - case PCI_DEVICE_ID_INTEL_IOAT_IVB2: - case PCI_DEVICE_ID_INTEL_IOAT_IVB3: - case PCI_DEVICE_ID_INTEL_IOAT_IVB4: - case PCI_DEVICE_ID_INTEL_IOAT_IVB5: - case PCI_DEVICE_ID_INTEL_IOAT_IVB6: - case PCI_DEVICE_ID_INTEL_IOAT_IVB7: - case PCI_DEVICE_ID_INTEL_IOAT_IVB8: - case PCI_DEVICE_ID_INTEL_IOAT_IVB9: - return true; - default: - return false; - } - -} - -static bool is_hsw_ioat(struct pci_dev *pdev) -{ - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT_HSW0: - case PCI_DEVICE_ID_INTEL_IOAT_HSW1: - case PCI_DEVICE_ID_INTEL_IOAT_HSW2: - case PCI_DEVICE_ID_INTEL_IOAT_HSW3: - case PCI_DEVICE_ID_INTEL_IOAT_HSW4: - case PCI_DEVICE_ID_INTEL_IOAT_HSW5: - case PCI_DEVICE_ID_INTEL_IOAT_HSW6: - case PCI_DEVICE_ID_INTEL_IOAT_HSW7: - case PCI_DEVICE_ID_INTEL_IOAT_HSW8: - case PCI_DEVICE_ID_INTEL_IOAT_HSW9: - return true; - default: - return false; - } - -} - -static bool is_xeon_cb32(struct pci_dev *pdev) -{ - return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || - is_hsw_ioat(pdev); -} - -static bool is_bwd_ioat(struct pci_dev *pdev) -{ - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT_BWD0: - case PCI_DEVICE_ID_INTEL_IOAT_BWD1: - case PCI_DEVICE_ID_INTEL_IOAT_BWD2: - case PCI_DEVICE_ID_INTEL_IOAT_BWD3: - /* even though not Atom, BDX-DE has same DMA silicon */ - case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: - case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: - case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: - case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: - return true; - default: - return false; - } -} - -static bool is_bwd_noraid(struct pci_dev *pdev) -{ - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT_BWD2: - case PCI_DEVICE_ID_INTEL_IOAT_BWD3: - case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: - case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: - case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: - case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: - return true; - default: - return false; - } - -} - -static void pq16_set_src(struct ioat_raw_descriptor *desc[3], - dma_addr_t addr, u32 offset, u8 coef, unsigned idx) -{ - struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0]; - struct ioat_pq16a_descriptor *pq16 = - (struct ioat_pq16a_descriptor *)desc[1]; - struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; - - raw->field[pq16_idx_to_field[idx]] = addr + offset; - - if (idx < 8) - pq->coef[idx] = coef; - else - pq16->coef[idx - 8] = coef; -} - -static struct ioat_sed_ent * -ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) -{ - struct ioat_sed_ent *sed; - gfp_t flags = __GFP_ZERO | GFP_ATOMIC; - - sed = kmem_cache_alloc(ioat3_sed_cache, flags); - if (!sed) - return NULL; - - sed->hw_pool = hw_pool; - sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool], - flags, &sed->dma); - if (!sed->hw) { - kmem_cache_free(ioat3_sed_cache, sed); - return NULL; - } - - return sed; -} - -static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed) -{ - if (!sed) - return; - - dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); - kmem_cache_free(ioat3_sed_cache, sed); -} - -static bool desc_has_ext(struct ioat_ring_ent *desc) -{ - struct ioat_dma_descriptor *hw = desc->hw; - - if (hw->ctl_f.op == IOAT_OP_XOR || - hw->ctl_f.op == IOAT_OP_XOR_VAL) { - struct ioat_xor_descriptor *xor = desc->xor; - - if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5) - return true; - } else if (hw->ctl_f.op == IOAT_OP_PQ || - hw->ctl_f.op == IOAT_OP_PQ_VAL) { - struct ioat_pq_descriptor *pq = desc->pq; - - if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3) - return true; - } - - return false; -} - -static u64 ioat3_get_current_completion(struct ioat_chan_common *chan) -{ - u64 phys_complete; - u64 completion; - - completion = *chan->completion; - phys_complete = ioat_chansts_to_addr(completion); - - dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, - (unsigned long long) phys_complete); - - return phys_complete; -} - -static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan, - u64 *phys_complete) -{ - *phys_complete = ioat3_get_current_completion(chan); - if (*phys_complete == chan->last_completion) - return false; - - clear_bit(IOAT_COMPLETION_ACK, &chan->state); - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - - return true; -} - -static void -desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc) -{ - struct ioat_dma_descriptor *hw = desc->hw; - - switch (hw->ctl_f.op) { - case IOAT_OP_PQ_VAL: - case IOAT_OP_PQ_VAL_16S: - { - struct ioat_pq_descriptor *pq = desc->pq; - - /* check if there's error written */ - if (!pq->dwbes_f.wbes) - return; - - /* need to set a chanerr var for checking to clear later */ - - if (pq->dwbes_f.p_val_err) - *desc->result |= SUM_CHECK_P_RESULT; - - if (pq->dwbes_f.q_val_err) - *desc->result |= SUM_CHECK_Q_RESULT; - - return; - } - default: - return; - } -} - -/** - * __cleanup - reclaim used descriptors - * @ioat: channel (ring) to clean - * - * The difference from the dma_v2.c __cleanup() is that this routine - * handles extended descriptors and dma-unmapping raid operations. - */ -static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) -{ - struct ioat_chan_common *chan = &ioat->base; - struct ioatdma_device *device = chan->device; - struct ioat_ring_ent *desc; - bool seen_current = false; - int idx = ioat->tail, i; - u16 active; - - dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", - __func__, ioat->head, ioat->tail, ioat->issued); - - /* - * At restart of the channel, the completion address and the - * channel status will be 0 due to starting a new chain. Since - * it's new chain and the first descriptor "fails", there is - * nothing to clean up. We do not want to reap the entire submitted - * chain due to this 0 address value and then BUG. - */ - if (!phys_complete) - return; - - active = ioat2_ring_active(ioat); - for (i = 0; i < active && !seen_current; i++) { - struct dma_async_tx_descriptor *tx; - - smp_read_barrier_depends(); - prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); - desc = ioat2_get_ring_ent(ioat, idx + i); - dump_desc_dbg(ioat, desc); - - /* set err stat if we are using dwbes */ - if (device->cap & IOAT_CAP_DWBES) - desc_get_errstat(ioat, desc); - - tx = &desc->txd; - if (tx->cookie) { - dma_cookie_complete(tx); - dma_descriptor_unmap(tx); - if (tx->callback) { - tx->callback(tx->callback_param); - tx->callback = NULL; - } - } - - if (tx->phys == phys_complete) - seen_current = true; - - /* skip extended descriptors */ - if (desc_has_ext(desc)) { - BUG_ON(i + 1 >= active); - i++; - } - - /* cleanup super extended descriptors */ - if (desc->sed) { - ioat3_free_sed(device, desc->sed); - desc->sed = NULL; - } - } - smp_mb(); /* finish all descriptor reads before incrementing tail */ - ioat->tail = idx + i; - BUG_ON(active && !seen_current); /* no active descs have written a completion? */ - chan->last_completion = phys_complete; - - if (active - i == 0) { - dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", - __func__); - clear_bit(IOAT_COMPLETION_PENDING, &chan->state); - mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); - } - /* 5 microsecond delay per pending descriptor */ - writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK), - chan->device->reg_base + IOAT_INTRDELAY_OFFSET); -} - -static void ioat3_cleanup(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - u64 phys_complete; - - spin_lock_bh(&chan->cleanup_lock); - - if (ioat3_cleanup_preamble(chan, &phys_complete)) - __cleanup(ioat, phys_complete); - - if (is_ioat_halted(*chan->completion)) { - u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - - if (chanerr & IOAT_CHANERR_HANDLE_MASK) { - mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); - ioat3_eh(ioat); - } - } - - spin_unlock_bh(&chan->cleanup_lock); -} - -static void ioat3_cleanup_event(unsigned long data) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); - struct ioat_chan_common *chan = &ioat->base; - - ioat3_cleanup(ioat); - if (!test_bit(IOAT_RUN, &chan->state)) - return; - writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); -} - -static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - u64 phys_complete; - - ioat2_quiesce(chan, 0); - if (ioat3_cleanup_preamble(chan, &phys_complete)) - __cleanup(ioat, phys_complete); - - __ioat2_restart_chan(ioat); -} - -static void ioat3_eh(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - struct pci_dev *pdev = to_pdev(chan); - struct ioat_dma_descriptor *hw; - struct dma_async_tx_descriptor *tx; - u64 phys_complete; - struct ioat_ring_ent *desc; - u32 err_handled = 0; - u32 chanerr_int; - u32 chanerr; - - /* cleanup so tail points to descriptor that caused the error */ - if (ioat3_cleanup_preamble(chan, &phys_complete)) - __cleanup(ioat, phys_complete); - - chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int); - - dev_dbg(to_dev(chan), "%s: error = %x:%x\n", - __func__, chanerr, chanerr_int); - - desc = ioat2_get_ring_ent(ioat, ioat->tail); - hw = desc->hw; - dump_desc_dbg(ioat, desc); - - switch (hw->ctl_f.op) { - case IOAT_OP_XOR_VAL: - if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { - *desc->result |= SUM_CHECK_P_RESULT; - err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; - } - break; - case IOAT_OP_PQ_VAL: - case IOAT_OP_PQ_VAL_16S: - if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { - *desc->result |= SUM_CHECK_P_RESULT; - err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; - } - if (chanerr & IOAT_CHANERR_XOR_Q_ERR) { - *desc->result |= SUM_CHECK_Q_RESULT; - err_handled |= IOAT_CHANERR_XOR_Q_ERR; - } - break; - } - - /* fault on unhandled error or spurious halt */ - if (chanerr ^ err_handled || chanerr == 0) { - dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n", - __func__, chanerr, err_handled); - BUG(); - } else { /* cleanup the faulty descriptor */ - tx = &desc->txd; - if (tx->cookie) { - dma_cookie_complete(tx); - dma_descriptor_unmap(tx); - if (tx->callback) { - tx->callback(tx->callback_param); - tx->callback = NULL; - } - } - } - - writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); - pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int); - - /* mark faulting descriptor as complete */ - *chan->completion = desc->txd.phys; - - spin_lock_bh(&ioat->prep_lock); - ioat3_restart_channel(ioat); - spin_unlock_bh(&ioat->prep_lock); -} - -static void check_active(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - - if (ioat2_ring_active(ioat)) { - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - return; - } - - if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state)) - mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); - else if (ioat->alloc_order > ioat_get_alloc_order()) { - /* if the ring is idle, empty, and oversized try to step - * down the size - */ - reshape_ring(ioat, ioat->alloc_order - 1); - - /* keep shrinking until we get back to our minimum - * default size - */ - if (ioat->alloc_order > ioat_get_alloc_order()) - mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); - } - -} - -static void ioat3_timer_event(unsigned long data) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); - struct ioat_chan_common *chan = &ioat->base; - dma_addr_t phys_complete; - u64 status; - - status = ioat_chansts(chan); - - /* when halted due to errors check for channel - * programming errors before advancing the completion state - */ - if (is_ioat_halted(status)) { - u32 chanerr; - - chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - dev_err(to_dev(chan), "%s: Channel halted (%x)\n", - __func__, chanerr); - if (test_bit(IOAT_RUN, &chan->state)) - BUG_ON(is_ioat_bug(chanerr)); - else /* we never got off the ground */ - return; - } - - /* if we haven't made progress and we have already - * acknowledged a pending completion once, then be more - * forceful with a restart - */ - spin_lock_bh(&chan->cleanup_lock); - if (ioat_cleanup_preamble(chan, &phys_complete)) - __cleanup(ioat, phys_complete); - else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { - spin_lock_bh(&ioat->prep_lock); - ioat3_restart_channel(ioat); - spin_unlock_bh(&ioat->prep_lock); - spin_unlock_bh(&chan->cleanup_lock); - return; - } else { - set_bit(IOAT_COMPLETION_ACK, &chan->state); - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - } - - - if (ioat2_ring_active(ioat)) - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - else { - spin_lock_bh(&ioat->prep_lock); - check_active(ioat); - spin_unlock_bh(&ioat->prep_lock); - } - spin_unlock_bh(&chan->cleanup_lock); -} - -static enum dma_status -ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, - struct dma_tx_state *txstate) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - enum dma_status ret; - - ret = dma_cookie_status(c, cookie, txstate); - if (ret == DMA_COMPLETE) - return ret; - - ioat3_cleanup(ioat); - - return dma_cookie_status(c, cookie, txstate); -} - -static struct dma_async_tx_descriptor * -__ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, - dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, - size_t len, unsigned long flags) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - struct ioat_ring_ent *compl_desc; - struct ioat_ring_ent *desc; - struct ioat_ring_ent *ext; - size_t total_len = len; - struct ioat_xor_descriptor *xor; - struct ioat_xor_ext_descriptor *xor_ex = NULL; - struct ioat_dma_descriptor *hw; - int num_descs, with_ext, idx, i; - u32 offset = 0; - u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; - - BUG_ON(src_cnt < 2); - - num_descs = ioat2_xferlen_to_descs(ioat, len); - /* we need 2x the number of descriptors to cover greater than 5 - * sources - */ - if (src_cnt > 5) { - with_ext = 1; - num_descs *= 2; - } else - with_ext = 0; - - /* completion writes from the raid engine may pass completion - * writes from the legacy engine, so we need one extra null - * (legacy) descriptor to ensure all completion writes arrive in - * order. - */ - if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0) - idx = ioat->head; - else - return NULL; - i = 0; - do { - struct ioat_raw_descriptor *descs[2]; - size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); - int s; - - desc = ioat2_get_ring_ent(ioat, idx + i); - xor = desc->xor; - - /* save a branch by unconditionally retrieving the - * extended descriptor xor_set_src() knows to not write - * to it in the single descriptor case - */ - ext = ioat2_get_ring_ent(ioat, idx + i + 1); - xor_ex = ext->xor_ex; - - descs[0] = (struct ioat_raw_descriptor *) xor; - descs[1] = (struct ioat_raw_descriptor *) xor_ex; - for (s = 0; s < src_cnt; s++) - xor_set_src(descs, src[s], offset, s); - xor->size = xfer_size; - xor->dst_addr = dest + offset; - xor->ctl = 0; - xor->ctl_f.op = op; - xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt); - - len -= xfer_size; - offset += xfer_size; - dump_desc_dbg(ioat, desc); - } while ((i += 1 + with_ext) < num_descs); - - /* last xor descriptor carries the unmap parameters and fence bit */ - desc->txd.flags = flags; - desc->len = total_len; - if (result) - desc->result = result; - xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE); - - /* completion descriptor carries interrupt bit */ - compl_desc = ioat2_get_ring_ent(ioat, idx + i); - compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; - hw = compl_desc->hw; - hw->ctl = 0; - hw->ctl_f.null = 1; - hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); - hw->ctl_f.compl_write = 1; - hw->size = NULL_DESC_BUFFER_SIZE; - dump_desc_dbg(ioat, compl_desc); - - /* we leave the channel locked to ensure in order submission */ - return &compl_desc->txd; -} - -static struct dma_async_tx_descriptor * -ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, - unsigned int src_cnt, size_t len, unsigned long flags) -{ - return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); -} - -static struct dma_async_tx_descriptor * -ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, - unsigned int src_cnt, size_t len, - enum sum_check_flags *result, unsigned long flags) -{ - /* the cleanup routine only sets bits on validate failure, it - * does not clear bits on validate success... so clear it here - */ - *result = 0; - - return __ioat3_prep_xor_lock(chan, result, src[0], &src[1], - src_cnt - 1, len, flags); -} - -static void -dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext) -{ - struct device *dev = to_dev(&ioat->base); - struct ioat_pq_descriptor *pq = desc->pq; - struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL; - struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex }; - int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); - int i; - - dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" - " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" - " src_cnt: %d)\n", - desc_id(desc), (unsigned long long) desc->txd.phys, - (unsigned long long) (pq_ex ? pq_ex->next : pq->next), - desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en, - pq->ctl_f.compl_write, - pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", - pq->ctl_f.src_cnt); - for (i = 0; i < src_cnt; i++) - dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, - (unsigned long long) pq_get_src(descs, i), pq->coef[i]); - dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); - dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); - dev_dbg(dev, "\tNEXT: %#llx\n", pq->next); -} - -static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat, - struct ioat_ring_ent *desc) -{ - struct device *dev = to_dev(&ioat->base); - struct ioat_pq_descriptor *pq = desc->pq; - struct ioat_raw_descriptor *descs[] = { (void *)pq, - (void *)pq, - (void *)pq }; - int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); - int i; - - if (desc->sed) { - descs[1] = (void *)desc->sed->hw; - descs[2] = (void *)desc->sed->hw + 64; - } - - dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" - " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" - " src_cnt: %d)\n", - desc_id(desc), (unsigned long long) desc->txd.phys, - (unsigned long long) pq->next, - desc->txd.flags, pq->size, pq->ctl, - pq->ctl_f.op, pq->ctl_f.int_en, - pq->ctl_f.compl_write, - pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", - pq->ctl_f.src_cnt); - for (i = 0; i < src_cnt; i++) { - dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, - (unsigned long long) pq16_get_src(descs, i), - pq->coef[i]); - } - dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); - dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); -} - -static struct dma_async_tx_descriptor * -__ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, - const dma_addr_t *dst, const dma_addr_t *src, - unsigned int src_cnt, const unsigned char *scf, - size_t len, unsigned long flags) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - struct ioat_chan_common *chan = &ioat->base; - struct ioatdma_device *device = chan->device; - struct ioat_ring_ent *compl_desc; - struct ioat_ring_ent *desc; - struct ioat_ring_ent *ext; - size_t total_len = len; - struct ioat_pq_descriptor *pq; - struct ioat_pq_ext_descriptor *pq_ex = NULL; - struct ioat_dma_descriptor *hw; - u32 offset = 0; - u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; - int i, s, idx, with_ext, num_descs; - int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0; - - dev_dbg(to_dev(chan), "%s\n", __func__); - /* the engine requires at least two sources (we provide - * at least 1 implied source in the DMA_PREP_CONTINUE case) - */ - BUG_ON(src_cnt + dmaf_continue(flags) < 2); - - num_descs = ioat2_xferlen_to_descs(ioat, len); - /* we need 2x the number of descriptors to cover greater than 3 - * sources (we need 1 extra source in the q-only continuation - * case and 3 extra sources in the p+q continuation case. - */ - if (src_cnt + dmaf_p_disabled_continue(flags) > 3 || - (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) { - with_ext = 1; - num_descs *= 2; - } else - with_ext = 0; - - /* completion writes from the raid engine may pass completion - * writes from the legacy engine, so we need one extra null - * (legacy) descriptor to ensure all completion writes arrive in - * order. - */ - if (likely(num_descs) && - ioat2_check_space_lock(ioat, num_descs + cb32) == 0) - idx = ioat->head; - else - return NULL; - i = 0; - do { - struct ioat_raw_descriptor *descs[2]; - size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); - - desc = ioat2_get_ring_ent(ioat, idx + i); - pq = desc->pq; - - /* save a branch by unconditionally retrieving the - * extended descriptor pq_set_src() knows to not write - * to it in the single descriptor case - */ - ext = ioat2_get_ring_ent(ioat, idx + i + with_ext); - pq_ex = ext->pq_ex; - - descs[0] = (struct ioat_raw_descriptor *) pq; - descs[1] = (struct ioat_raw_descriptor *) pq_ex; - - for (s = 0; s < src_cnt; s++) - pq_set_src(descs, src[s], offset, scf[s], s); - - /* see the comment for dma_maxpq in include/linux/dmaengine.h */ - if (dmaf_p_disabled_continue(flags)) - pq_set_src(descs, dst[1], offset, 1, s++); - else if (dmaf_continue(flags)) { - pq_set_src(descs, dst[0], offset, 0, s++); - pq_set_src(descs, dst[1], offset, 1, s++); - pq_set_src(descs, dst[1], offset, 0, s++); - } - pq->size = xfer_size; - pq->p_addr = dst[0] + offset; - pq->q_addr = dst[1] + offset; - pq->ctl = 0; - pq->ctl_f.op = op; - /* we turn on descriptor write back error status */ - if (device->cap & IOAT_CAP_DWBES) - pq->ctl_f.wb_en = result ? 1 : 0; - pq->ctl_f.src_cnt = src_cnt_to_hw(s); - pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); - pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); - - len -= xfer_size; - offset += xfer_size; - } while ((i += 1 + with_ext) < num_descs); - - /* last pq descriptor carries the unmap parameters and fence bit */ - desc->txd.flags = flags; - desc->len = total_len; - if (result) - desc->result = result; - pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); - dump_pq_desc_dbg(ioat, desc, ext); - - if (!cb32) { - pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); - pq->ctl_f.compl_write = 1; - compl_desc = desc; - } else { - /* completion descriptor carries interrupt bit */ - compl_desc = ioat2_get_ring_ent(ioat, idx + i); - compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; - hw = compl_desc->hw; - hw->ctl = 0; - hw->ctl_f.null = 1; - hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); - hw->ctl_f.compl_write = 1; - hw->size = NULL_DESC_BUFFER_SIZE; - dump_desc_dbg(ioat, compl_desc); - } - - - /* we leave the channel locked to ensure in order submission */ - return &compl_desc->txd; -} - -static struct dma_async_tx_descriptor * -__ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, - const dma_addr_t *dst, const dma_addr_t *src, - unsigned int src_cnt, const unsigned char *scf, - size_t len, unsigned long flags) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - struct ioat_chan_common *chan = &ioat->base; - struct ioatdma_device *device = chan->device; - struct ioat_ring_ent *desc; - size_t total_len = len; - struct ioat_pq_descriptor *pq; - u32 offset = 0; - u8 op; - int i, s, idx, num_descs; - - /* this function is only called with 9-16 sources */ - op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; - - dev_dbg(to_dev(chan), "%s\n", __func__); - - num_descs = ioat2_xferlen_to_descs(ioat, len); - - /* - * 16 source pq is only available on cb3.3 and has no completion - * write hw bug. - */ - if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0) - idx = ioat->head; - else - return NULL; - - i = 0; - - do { - struct ioat_raw_descriptor *descs[4]; - size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); - - desc = ioat2_get_ring_ent(ioat, idx + i); - pq = desc->pq; - - descs[0] = (struct ioat_raw_descriptor *) pq; - - desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3); - if (!desc->sed) { - dev_err(to_dev(chan), - "%s: no free sed entries\n", __func__); - return NULL; - } - - pq->sed_addr = desc->sed->dma; - desc->sed->parent = desc; - - descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw; - descs[2] = (void *)descs[1] + 64; - - for (s = 0; s < src_cnt; s++) - pq16_set_src(descs, src[s], offset, scf[s], s); - - /* see the comment for dma_maxpq in include/linux/dmaengine.h */ - if (dmaf_p_disabled_continue(flags)) - pq16_set_src(descs, dst[1], offset, 1, s++); - else if (dmaf_continue(flags)) { - pq16_set_src(descs, dst[0], offset, 0, s++); - pq16_set_src(descs, dst[1], offset, 1, s++); - pq16_set_src(descs, dst[1], offset, 0, s++); - } - - pq->size = xfer_size; - pq->p_addr = dst[0] + offset; - pq->q_addr = dst[1] + offset; - pq->ctl = 0; - pq->ctl_f.op = op; - pq->ctl_f.src_cnt = src16_cnt_to_hw(s); - /* we turn on descriptor write back error status */ - if (device->cap & IOAT_CAP_DWBES) - pq->ctl_f.wb_en = result ? 1 : 0; - pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); - pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); - - len -= xfer_size; - offset += xfer_size; - } while (++i < num_descs); - - /* last pq descriptor carries the unmap parameters and fence bit */ - desc->txd.flags = flags; - desc->len = total_len; - if (result) - desc->result = result; - pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); - - /* with cb3.3 we should be able to do completion w/o a null desc */ - pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); - pq->ctl_f.compl_write = 1; - - dump_pq16_desc_dbg(ioat, desc); - - /* we leave the channel locked to ensure in order submission */ - return &desc->txd; -} - -static int src_cnt_flags(unsigned int src_cnt, unsigned long flags) -{ - if (dmaf_p_disabled_continue(flags)) - return src_cnt + 1; - else if (dmaf_continue(flags)) - return src_cnt + 3; - else - return src_cnt; -} - -static struct dma_async_tx_descriptor * -ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, - unsigned int src_cnt, const unsigned char *scf, size_t len, - unsigned long flags) -{ - /* specify valid address for disabled result */ - if (flags & DMA_PREP_PQ_DISABLE_P) - dst[0] = dst[1]; - if (flags & DMA_PREP_PQ_DISABLE_Q) - dst[1] = dst[0]; - - /* handle the single source multiply case from the raid6 - * recovery path - */ - if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) { - dma_addr_t single_source[2]; - unsigned char single_source_coef[2]; - - BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q); - single_source[0] = src[0]; - single_source[1] = src[0]; - single_source_coef[0] = scf[0]; - single_source_coef[1] = 0; - - return src_cnt_flags(src_cnt, flags) > 8 ? - __ioat3_prep_pq16_lock(chan, NULL, dst, single_source, - 2, single_source_coef, len, - flags) : - __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2, - single_source_coef, len, flags); - - } else { - return src_cnt_flags(src_cnt, flags) > 8 ? - __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt, - scf, len, flags) : - __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, - scf, len, flags); - } -} - -static struct dma_async_tx_descriptor * -ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, - unsigned int src_cnt, const unsigned char *scf, size_t len, - enum sum_check_flags *pqres, unsigned long flags) -{ - /* specify valid address for disabled result */ - if (flags & DMA_PREP_PQ_DISABLE_P) - pq[0] = pq[1]; - if (flags & DMA_PREP_PQ_DISABLE_Q) - pq[1] = pq[0]; - - /* the cleanup routine only sets bits on validate failure, it - * does not clear bits on validate success... so clear it here - */ - *pqres = 0; - - return src_cnt_flags(src_cnt, flags) > 8 ? - __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, - flags) : - __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, - flags); -} - -static struct dma_async_tx_descriptor * -ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, - unsigned int src_cnt, size_t len, unsigned long flags) -{ - unsigned char scf[src_cnt]; - dma_addr_t pq[2]; - - memset(scf, 0, src_cnt); - pq[0] = dst; - flags |= DMA_PREP_PQ_DISABLE_Q; - pq[1] = dst; /* specify valid address for disabled result */ - - return src_cnt_flags(src_cnt, flags) > 8 ? - __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, - flags) : - __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, - flags); -} - -static struct dma_async_tx_descriptor * -ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, - unsigned int src_cnt, size_t len, - enum sum_check_flags *result, unsigned long flags) -{ - unsigned char scf[src_cnt]; - dma_addr_t pq[2]; - - /* the cleanup routine only sets bits on validate failure, it - * does not clear bits on validate success... so clear it here - */ - *result = 0; - - memset(scf, 0, src_cnt); - pq[0] = src[0]; - flags |= DMA_PREP_PQ_DISABLE_Q; - pq[1] = pq[0]; /* specify valid address for disabled result */ - - return src_cnt_flags(src_cnt, flags) > 8 ? - __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, - scf, len, flags) : - __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, - scf, len, flags); -} - -static struct dma_async_tx_descriptor * -ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - struct ioat_ring_ent *desc; - struct ioat_dma_descriptor *hw; - - if (ioat2_check_space_lock(ioat, 1) == 0) - desc = ioat2_get_ring_ent(ioat, ioat->head); - else - return NULL; - - hw = desc->hw; - hw->ctl = 0; - hw->ctl_f.null = 1; - hw->ctl_f.int_en = 1; - hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); - hw->ctl_f.compl_write = 1; - hw->size = NULL_DESC_BUFFER_SIZE; - hw->src_addr = 0; - hw->dst_addr = 0; - - desc->txd.flags = flags; - desc->len = 1; - - dump_desc_dbg(ioat, desc); - - /* we leave the channel locked to ensure in order submission */ - return &desc->txd; -} - -static void ioat3_dma_test_callback(void *dma_async_param) -{ - struct completion *cmp = dma_async_param; - - complete(cmp); -} - -#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ -static int ioat_xor_val_self_test(struct ioatdma_device *device) -{ - int i, src_idx; - struct page *dest; - struct page *xor_srcs[IOAT_NUM_SRC_TEST]; - struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; - dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; - dma_addr_t dest_dma; - struct dma_async_tx_descriptor *tx; - struct dma_chan *dma_chan; - dma_cookie_t cookie; - u8 cmp_byte = 0; - u32 cmp_word; - u32 xor_val_result; - int err = 0; - struct completion cmp; - unsigned long tmo; - struct device *dev = &device->pdev->dev; - struct dma_device *dma = &device->common; - u8 op = 0; - - dev_dbg(dev, "%s\n", __func__); - - if (!dma_has_cap(DMA_XOR, dma->cap_mask)) - return 0; - - for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { - xor_srcs[src_idx] = alloc_page(GFP_KERNEL); - if (!xor_srcs[src_idx]) { - while (src_idx--) - __free_page(xor_srcs[src_idx]); - return -ENOMEM; - } - } - - dest = alloc_page(GFP_KERNEL); - if (!dest) { - while (src_idx--) - __free_page(xor_srcs[src_idx]); - return -ENOMEM; - } - - /* Fill in src buffers */ - for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { - u8 *ptr = page_address(xor_srcs[src_idx]); - for (i = 0; i < PAGE_SIZE; i++) - ptr[i] = (1 << src_idx); - } - - for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) - cmp_byte ^= (u8) (1 << src_idx); - - cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | - (cmp_byte << 8) | cmp_byte; - - memset(page_address(dest), 0, PAGE_SIZE); - - dma_chan = container_of(dma->channels.next, struct dma_chan, - device_node); - if (dma->device_alloc_chan_resources(dma_chan) < 1) { - err = -ENODEV; - goto out; - } - - /* test xor */ - op = IOAT_OP_XOR; - - dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, dest_dma)) - goto dma_unmap; - - for (i = 0; i < IOAT_NUM_SRC_TEST; i++) - dma_srcs[i] = DMA_ERROR_CODE; - for (i = 0; i < IOAT_NUM_SRC_TEST; i++) { - dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma_srcs[i])) - goto dma_unmap; - } - tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, - IOAT_NUM_SRC_TEST, PAGE_SIZE, - DMA_PREP_INTERRUPT); - - if (!tx) { - dev_err(dev, "Self-test xor prep failed\n"); - err = -ENODEV; - goto dma_unmap; - } - - async_tx_ack(tx); - init_completion(&cmp); - tx->callback = ioat3_dma_test_callback; - tx->callback_param = &cmp; - cookie = tx->tx_submit(tx); - if (cookie < 0) { - dev_err(dev, "Self-test xor setup failed\n"); - err = -ENODEV; - goto dma_unmap; - } - dma->device_issue_pending(dma_chan); - - tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - - if (tmo == 0 || - dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { - dev_err(dev, "Self-test xor timed out\n"); - err = -ENODEV; - goto dma_unmap; - } - - for (i = 0; i < IOAT_NUM_SRC_TEST; i++) - dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); - - dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); - for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { - u32 *ptr = page_address(dest); - if (ptr[i] != cmp_word) { - dev_err(dev, "Self-test xor failed compare\n"); - err = -ENODEV; - goto free_resources; - } - } - dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); - - dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); - - /* skip validate if the capability is not present */ - if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) - goto free_resources; - - op = IOAT_OP_XOR_VAL; - - /* validate the sources with the destintation page */ - for (i = 0; i < IOAT_NUM_SRC_TEST; i++) - xor_val_srcs[i] = xor_srcs[i]; - xor_val_srcs[i] = dest; - - xor_val_result = 1; - - for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) - dma_srcs[i] = DMA_ERROR_CODE; - for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { - dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma_srcs[i])) - goto dma_unmap; - } - tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, - IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, - &xor_val_result, DMA_PREP_INTERRUPT); - if (!tx) { - dev_err(dev, "Self-test zero prep failed\n"); - err = -ENODEV; - goto dma_unmap; - } - - async_tx_ack(tx); - init_completion(&cmp); - tx->callback = ioat3_dma_test_callback; - tx->callback_param = &cmp; - cookie = tx->tx_submit(tx); - if (cookie < 0) { - dev_err(dev, "Self-test zero setup failed\n"); - err = -ENODEV; - goto dma_unmap; - } - dma->device_issue_pending(dma_chan); - - tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - - if (tmo == 0 || - dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { - dev_err(dev, "Self-test validate timed out\n"); - err = -ENODEV; - goto dma_unmap; - } - - for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) - dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); - - if (xor_val_result != 0) { - dev_err(dev, "Self-test validate failed compare\n"); - err = -ENODEV; - goto free_resources; - } - - memset(page_address(dest), 0, PAGE_SIZE); - - /* test for non-zero parity sum */ - op = IOAT_OP_XOR_VAL; - - xor_val_result = 0; - for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) - dma_srcs[i] = DMA_ERROR_CODE; - for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { - dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma_srcs[i])) - goto dma_unmap; - } - tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, - IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, - &xor_val_result, DMA_PREP_INTERRUPT); - if (!tx) { - dev_err(dev, "Self-test 2nd zero prep failed\n"); - err = -ENODEV; - goto dma_unmap; - } - - async_tx_ack(tx); - init_completion(&cmp); - tx->callback = ioat3_dma_test_callback; - tx->callback_param = &cmp; - cookie = tx->tx_submit(tx); - if (cookie < 0) { - dev_err(dev, "Self-test 2nd zero setup failed\n"); - err = -ENODEV; - goto dma_unmap; - } - dma->device_issue_pending(dma_chan); - - tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - - if (tmo == 0 || - dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { - dev_err(dev, "Self-test 2nd validate timed out\n"); - err = -ENODEV; - goto dma_unmap; - } - - if (xor_val_result != SUM_CHECK_P_RESULT) { - dev_err(dev, "Self-test validate failed compare\n"); - err = -ENODEV; - goto dma_unmap; - } - - for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) - dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); - - goto free_resources; -dma_unmap: - if (op == IOAT_OP_XOR) { - if (dest_dma != DMA_ERROR_CODE) - dma_unmap_page(dev, dest_dma, PAGE_SIZE, - DMA_FROM_DEVICE); - for (i = 0; i < IOAT_NUM_SRC_TEST; i++) - if (dma_srcs[i] != DMA_ERROR_CODE) - dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, - DMA_TO_DEVICE); - } else if (op == IOAT_OP_XOR_VAL) { - for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) - if (dma_srcs[i] != DMA_ERROR_CODE) - dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, - DMA_TO_DEVICE); - } -free_resources: - dma->device_free_chan_resources(dma_chan); -out: - src_idx = IOAT_NUM_SRC_TEST; - while (src_idx--) - __free_page(xor_srcs[src_idx]); - __free_page(dest); - return err; -} - -static int ioat3_dma_self_test(struct ioatdma_device *device) -{ - int rc = ioat_dma_self_test(device); - - if (rc) - return rc; - - rc = ioat_xor_val_self_test(device); - if (rc) - return rc; - - return 0; -} - -static int ioat3_irq_reinit(struct ioatdma_device *device) -{ - struct pci_dev *pdev = device->pdev; - int irq = pdev->irq, i; - - if (!is_bwd_ioat(pdev)) - return 0; - - switch (device->irq_mode) { - case IOAT_MSIX: - for (i = 0; i < device->common.chancnt; i++) { - struct msix_entry *msix = &device->msix_entries[i]; - struct ioat_chan_common *chan; - - chan = ioat_chan_by_index(device, i); - devm_free_irq(&pdev->dev, msix->vector, chan); - } - - pci_disable_msix(pdev); - break; - case IOAT_MSI: - pci_disable_msi(pdev); - /* fall through */ - case IOAT_INTX: - devm_free_irq(&pdev->dev, irq, device); - break; - default: - return 0; - } - device->irq_mode = IOAT_NOIRQ; - - return ioat_dma_setup_interrupts(device); -} - -static int ioat3_reset_hw(struct ioat_chan_common *chan) -{ - /* throw away whatever the channel was doing and get it - * initialized, with ioat3 specific workarounds - */ - struct ioatdma_device *device = chan->device; - struct pci_dev *pdev = device->pdev; - u32 chanerr; - u16 dev_id; - int err; - - ioat2_quiesce(chan, msecs_to_jiffies(100)); - - chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); - - if (device->version < IOAT_VER_3_3) { - /* clear any pending errors */ - err = pci_read_config_dword(pdev, - IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); - if (err) { - dev_err(&pdev->dev, - "channel error register unreachable\n"); - return err; - } - pci_write_config_dword(pdev, - IOAT_PCI_CHANERR_INT_OFFSET, chanerr); - - /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit - * (workaround for spurious config parity error after restart) - */ - pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); - if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { - pci_write_config_dword(pdev, - IOAT_PCI_DMAUNCERRSTS_OFFSET, - 0x10); - } - } - - err = ioat2_reset_sync(chan, msecs_to_jiffies(200)); - if (!err) - err = ioat3_irq_reinit(device); - - if (err) - dev_err(&pdev->dev, "Failed to reset: %d\n", err); - - return err; -} - -static void ioat3_intr_quirk(struct ioatdma_device *device) -{ - struct dma_device *dma; - struct dma_chan *c; - struct ioat_chan_common *chan; - u32 errmask; - - dma = &device->common; - - /* - * if we have descriptor write back error status, we mask the - * error interrupts - */ - if (device->cap & IOAT_CAP_DWBES) { - list_for_each_entry(c, &dma->channels, device_node) { - chan = to_chan_common(c); - errmask = readl(chan->reg_base + - IOAT_CHANERR_MASK_OFFSET); - errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR | - IOAT_CHANERR_XOR_Q_ERR; - writel(errmask, chan->reg_base + - IOAT_CHANERR_MASK_OFFSET); - } - } -} - -int ioat3_dma_probe(struct ioatdma_device *device, int dca) -{ - struct pci_dev *pdev = device->pdev; - int dca_en = system_has_dca_enabled(pdev); - struct dma_device *dma; - struct dma_chan *c; - struct ioat_chan_common *chan; - bool is_raid_device = false; - int err; - - device->enumerate_channels = ioat2_enumerate_channels; - device->reset_hw = ioat3_reset_hw; - device->self_test = ioat3_dma_self_test; - device->intr_quirk = ioat3_intr_quirk; - dma = &device->common; - dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; - dma->device_issue_pending = ioat2_issue_pending; - dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; - dma->device_free_chan_resources = ioat2_free_chan_resources; - - dma_cap_set(DMA_INTERRUPT, dma->cap_mask); - dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; - - device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); - - if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev)) - device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); - - /* dca is incompatible with raid operations */ - if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) - device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); - - if (device->cap & IOAT_CAP_XOR) { - is_raid_device = true; - dma->max_xor = 8; - - dma_cap_set(DMA_XOR, dma->cap_mask); - dma->device_prep_dma_xor = ioat3_prep_xor; - - dma_cap_set(DMA_XOR_VAL, dma->cap_mask); - dma->device_prep_dma_xor_val = ioat3_prep_xor_val; - } - - if (device->cap & IOAT_CAP_PQ) { - is_raid_device = true; - - dma->device_prep_dma_pq = ioat3_prep_pq; - dma->device_prep_dma_pq_val = ioat3_prep_pq_val; - dma_cap_set(DMA_PQ, dma->cap_mask); - dma_cap_set(DMA_PQ_VAL, dma->cap_mask); - - if (device->cap & IOAT_CAP_RAID16SS) { - dma_set_maxpq(dma, 16, 0); - } else { - dma_set_maxpq(dma, 8, 0); - } - - if (!(device->cap & IOAT_CAP_XOR)) { - dma->device_prep_dma_xor = ioat3_prep_pqxor; - dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; - dma_cap_set(DMA_XOR, dma->cap_mask); - dma_cap_set(DMA_XOR_VAL, dma->cap_mask); - - if (device->cap & IOAT_CAP_RAID16SS) { - dma->max_xor = 16; - } else { - dma->max_xor = 8; - } - } - } - - dma->device_tx_status = ioat3_tx_status; - device->cleanup_fn = ioat3_cleanup_event; - device->timer_fn = ioat3_timer_event; - - /* starting with CB3.3 super extended descriptors are supported */ - if (device->cap & IOAT_CAP_RAID16SS) { - char pool_name[14]; - int i; - - for (i = 0; i < MAX_SED_POOLS; i++) { - snprintf(pool_name, 14, "ioat_hw%d_sed", i); - - /* allocate SED DMA pool */ - device->sed_hw_pool[i] = dmam_pool_create(pool_name, - &pdev->dev, - SED_SIZE * (i + 1), 64, 0); - if (!device->sed_hw_pool[i]) - return -ENOMEM; - - } - } - - err = ioat_probe(device); - if (err) - return err; - - list_for_each_entry(c, &dma->channels, device_node) { - chan = to_chan_common(c); - writel(IOAT_DMA_DCA_ANY_CPU, - chan->reg_base + IOAT_DCACTRL_OFFSET); - } - - err = ioat_register(device); - if (err) - return err; - - ioat_kobject_add(device, &ioat2_ktype); - - if (dca) - device->dca = ioat3_dca_init(pdev, device->reg_base); - - return 0; -} diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index a3e731edc..690e3b4f8 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h @@ -21,11 +21,6 @@ #define IOAT_MMIO_BAR 0 /* CB device ID's */ -#define IOAT_PCI_DID_5000 0x1A38 -#define IOAT_PCI_DID_CNB 0x360B -#define IOAT_PCI_DID_SCNB 0x65FF -#define IOAT_PCI_DID_SNB 0x402F - #define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20 #define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21 #define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22 @@ -58,6 +53,17 @@ #define PCI_DEVICE_ID_INTEL_IOAT_BDXDE2 0x6f52 #define PCI_DEVICE_ID_INTEL_IOAT_BDXDE3 0x6f53 +#define PCI_DEVICE_ID_INTEL_IOAT_BDX0 0x6f20 +#define PCI_DEVICE_ID_INTEL_IOAT_BDX1 0x6f21 +#define PCI_DEVICE_ID_INTEL_IOAT_BDX2 0x6f22 +#define PCI_DEVICE_ID_INTEL_IOAT_BDX3 0x6f23 +#define PCI_DEVICE_ID_INTEL_IOAT_BDX4 0x6f24 +#define PCI_DEVICE_ID_INTEL_IOAT_BDX5 0x6f25 +#define PCI_DEVICE_ID_INTEL_IOAT_BDX6 0x6f26 +#define PCI_DEVICE_ID_INTEL_IOAT_BDX7 0x6f27 +#define PCI_DEVICE_ID_INTEL_IOAT_BDX8 0x6f2e +#define PCI_DEVICE_ID_INTEL_IOAT_BDX9 0x6f2f + #define IOAT_VER_1_2 0x12 /* Version 1.2 */ #define IOAT_VER_2_0 0x20 /* Version 2.0 */ #define IOAT_VER_3_0 0x30 /* Version 3.0 */ diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c new file mode 100644 index 000000000..1c3c9b0ab --- /dev/null +++ b/drivers/dma/ioat/init.c @@ -0,0 +1,1314 @@ +/* + * Intel I/OAT DMA Linux driver + * Copyright(c) 2004 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "dma.h" +#include "registers.h" +#include "hw.h" + +#include "../dmaengine.h" + +MODULE_VERSION(IOAT_DMA_VERSION); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel Corporation"); + +static struct pci_device_id ioat_pci_tbl[] = { + /* I/OAT v3 platforms */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) }, + + /* I/OAT v3.2 platforms */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX3) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX4) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX5) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX7) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) }, + + /* I/OAT v3.3 platforms */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) }, + + { 0, } +}; +MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); + +static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); +static void ioat_remove(struct pci_dev *pdev); +static void +ioat_init_channel(struct ioatdma_device *ioat_dma, + struct ioatdma_chan *ioat_chan, int idx); +static void ioat_intr_quirk(struct ioatdma_device *ioat_dma); +static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma); +static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma); + +static int ioat_dca_enabled = 1; +module_param(ioat_dca_enabled, int, 0644); +MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); +int ioat_pending_level = 4; +module_param(ioat_pending_level, int, 0644); +MODULE_PARM_DESC(ioat_pending_level, + "high-water mark for pushing ioat descriptors (default: 4)"); +int ioat_ring_alloc_order = 8; +module_param(ioat_ring_alloc_order, int, 0644); +MODULE_PARM_DESC(ioat_ring_alloc_order, + "ioat+: allocate 2^n descriptors per channel (default: 8 max: 16)"); +int ioat_ring_max_alloc_order = IOAT_MAX_ORDER; +module_param(ioat_ring_max_alloc_order, int, 0644); +MODULE_PARM_DESC(ioat_ring_max_alloc_order, + "ioat+: upper limit for ring size (default: 16)"); +static char ioat_interrupt_style[32] = "msix"; +module_param_string(ioat_interrupt_style, ioat_interrupt_style, + sizeof(ioat_interrupt_style), 0644); +MODULE_PARM_DESC(ioat_interrupt_style, + "set ioat interrupt style: msix (default), msi, intx"); + +struct kmem_cache *ioat_cache; +struct kmem_cache *ioat_sed_cache; + +static bool is_jf_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_JSF0: + case PCI_DEVICE_ID_INTEL_IOAT_JSF1: + case PCI_DEVICE_ID_INTEL_IOAT_JSF2: + case PCI_DEVICE_ID_INTEL_IOAT_JSF3: + case PCI_DEVICE_ID_INTEL_IOAT_JSF4: + case PCI_DEVICE_ID_INTEL_IOAT_JSF5: + case PCI_DEVICE_ID_INTEL_IOAT_JSF6: + case PCI_DEVICE_ID_INTEL_IOAT_JSF7: + case PCI_DEVICE_ID_INTEL_IOAT_JSF8: + case PCI_DEVICE_ID_INTEL_IOAT_JSF9: + return true; + default: + return false; + } +} + +static bool is_snb_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_SNB0: + case PCI_DEVICE_ID_INTEL_IOAT_SNB1: + case PCI_DEVICE_ID_INTEL_IOAT_SNB2: + case PCI_DEVICE_ID_INTEL_IOAT_SNB3: + case PCI_DEVICE_ID_INTEL_IOAT_SNB4: + case PCI_DEVICE_ID_INTEL_IOAT_SNB5: + case PCI_DEVICE_ID_INTEL_IOAT_SNB6: + case PCI_DEVICE_ID_INTEL_IOAT_SNB7: + case PCI_DEVICE_ID_INTEL_IOAT_SNB8: + case PCI_DEVICE_ID_INTEL_IOAT_SNB9: + return true; + default: + return false; + } +} + +static bool is_ivb_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_IVB0: + case PCI_DEVICE_ID_INTEL_IOAT_IVB1: + case PCI_DEVICE_ID_INTEL_IOAT_IVB2: + case PCI_DEVICE_ID_INTEL_IOAT_IVB3: + case PCI_DEVICE_ID_INTEL_IOAT_IVB4: + case PCI_DEVICE_ID_INTEL_IOAT_IVB5: + case PCI_DEVICE_ID_INTEL_IOAT_IVB6: + case PCI_DEVICE_ID_INTEL_IOAT_IVB7: + case PCI_DEVICE_ID_INTEL_IOAT_IVB8: + case PCI_DEVICE_ID_INTEL_IOAT_IVB9: + return true; + default: + return false; + } + +} + +static bool is_hsw_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_HSW0: + case PCI_DEVICE_ID_INTEL_IOAT_HSW1: + case PCI_DEVICE_ID_INTEL_IOAT_HSW2: + case PCI_DEVICE_ID_INTEL_IOAT_HSW3: + case PCI_DEVICE_ID_INTEL_IOAT_HSW4: + case PCI_DEVICE_ID_INTEL_IOAT_HSW5: + case PCI_DEVICE_ID_INTEL_IOAT_HSW6: + case PCI_DEVICE_ID_INTEL_IOAT_HSW7: + case PCI_DEVICE_ID_INTEL_IOAT_HSW8: + case PCI_DEVICE_ID_INTEL_IOAT_HSW9: + return true; + default: + return false; + } + +} + +static bool is_bdx_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_BDX0: + case PCI_DEVICE_ID_INTEL_IOAT_BDX1: + case PCI_DEVICE_ID_INTEL_IOAT_BDX2: + case PCI_DEVICE_ID_INTEL_IOAT_BDX3: + case PCI_DEVICE_ID_INTEL_IOAT_BDX4: + case PCI_DEVICE_ID_INTEL_IOAT_BDX5: + case PCI_DEVICE_ID_INTEL_IOAT_BDX6: + case PCI_DEVICE_ID_INTEL_IOAT_BDX7: + case PCI_DEVICE_ID_INTEL_IOAT_BDX8: + case PCI_DEVICE_ID_INTEL_IOAT_BDX9: + return true; + default: + return false; + } +} + +static bool is_xeon_cb32(struct pci_dev *pdev) +{ + return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || + is_hsw_ioat(pdev) || is_bdx_ioat(pdev); +} + +bool is_bwd_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_BWD0: + case PCI_DEVICE_ID_INTEL_IOAT_BWD1: + case PCI_DEVICE_ID_INTEL_IOAT_BWD2: + case PCI_DEVICE_ID_INTEL_IOAT_BWD3: + /* even though not Atom, BDX-DE has same DMA silicon */ + case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: + case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: + case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: + case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: + return true; + default: + return false; + } +} + +static bool is_bwd_noraid(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_BWD2: + case PCI_DEVICE_ID_INTEL_IOAT_BWD3: + case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: + case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: + case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: + case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: + return true; + default: + return false; + } + +} + +/* + * Perform a IOAT transaction to verify the HW works. + */ +#define IOAT_TEST_SIZE 2000 + +static void ioat_dma_test_callback(void *dma_async_param) +{ + struct completion *cmp = dma_async_param; + + complete(cmp); +} + +/** + * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. + * @ioat_dma: dma device to be tested + */ +static int ioat_dma_self_test(struct ioatdma_device *ioat_dma) +{ + int i; + u8 *src; + u8 *dest; + struct dma_device *dma = &ioat_dma->dma_dev; + struct device *dev = &ioat_dma->pdev->dev; + struct dma_chan *dma_chan; + struct dma_async_tx_descriptor *tx; + dma_addr_t dma_dest, dma_src; + dma_cookie_t cookie; + int err = 0; + struct completion cmp; + unsigned long tmo; + unsigned long flags; + + src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); + if (!src) + return -ENOMEM; + dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); + if (!dest) { + kfree(src); + return -ENOMEM; + } + + /* Fill in src buffer */ + for (i = 0; i < IOAT_TEST_SIZE; i++) + src[i] = (u8)i; + + /* Start copy, using first DMA channel */ + dma_chan = container_of(dma->channels.next, struct dma_chan, + device_node); + if (dma->device_alloc_chan_resources(dma_chan) < 1) { + dev_err(dev, "selftest cannot allocate chan resource\n"); + err = -ENODEV; + goto out; + } + + dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_src)) { + dev_err(dev, "mapping src buffer failed\n"); + goto free_resources; + } + dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dma_dest)) { + dev_err(dev, "mapping dest buffer failed\n"); + goto unmap_src; + } + flags = DMA_PREP_INTERRUPT; + tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest, + dma_src, IOAT_TEST_SIZE, + flags); + if (!tx) { + dev_err(dev, "Self-test prep failed, disabling\n"); + err = -ENODEV; + goto unmap_dma; + } + + async_tx_ack(tx); + init_completion(&cmp); + tx->callback = ioat_dma_test_callback; + tx->callback_param = &cmp; + cookie = tx->tx_submit(tx); + if (cookie < 0) { + dev_err(dev, "Self-test setup failed, disabling\n"); + err = -ENODEV; + goto unmap_dma; + } + dma->device_issue_pending(dma_chan); + + tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); + + if (tmo == 0 || + dma->device_tx_status(dma_chan, cookie, NULL) + != DMA_COMPLETE) { + dev_err(dev, "Self-test copy timed out, disabling\n"); + err = -ENODEV; + goto unmap_dma; + } + if (memcmp(src, dest, IOAT_TEST_SIZE)) { + dev_err(dev, "Self-test copy failed compare, disabling\n"); + err = -ENODEV; + goto free_resources; + } + +unmap_dma: + dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); +unmap_src: + dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); +free_resources: + dma->device_free_chan_resources(dma_chan); +out: + kfree(src); + kfree(dest); + return err; +} + +/** + * ioat_dma_setup_interrupts - setup interrupt handler + * @ioat_dma: ioat dma device + */ +int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma) +{ + struct ioatdma_chan *ioat_chan; + struct pci_dev *pdev = ioat_dma->pdev; + struct device *dev = &pdev->dev; + struct msix_entry *msix; + int i, j, msixcnt; + int err = -EINVAL; + u8 intrctrl = 0; + + if (!strcmp(ioat_interrupt_style, "msix")) + goto msix; + if (!strcmp(ioat_interrupt_style, "msi")) + goto msi; + if (!strcmp(ioat_interrupt_style, "intx")) + goto intx; + dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style); + goto err_no_irq; + +msix: + /* The number of MSI-X vectors should equal the number of channels */ + msixcnt = ioat_dma->dma_dev.chancnt; + for (i = 0; i < msixcnt; i++) + ioat_dma->msix_entries[i].entry = i; + + err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt); + if (err) + goto msi; + + for (i = 0; i < msixcnt; i++) { + msix = &ioat_dma->msix_entries[i]; + ioat_chan = ioat_chan_by_index(ioat_dma, i); + err = devm_request_irq(dev, msix->vector, + ioat_dma_do_interrupt_msix, 0, + "ioat-msix", ioat_chan); + if (err) { + for (j = 0; j < i; j++) { + msix = &ioat_dma->msix_entries[j]; + ioat_chan = ioat_chan_by_index(ioat_dma, j); + devm_free_irq(dev, msix->vector, ioat_chan); + } + goto msi; + } + } + intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; + ioat_dma->irq_mode = IOAT_MSIX; + goto done; + +msi: + err = pci_enable_msi(pdev); + if (err) + goto intx; + + err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, + "ioat-msi", ioat_dma); + if (err) { + pci_disable_msi(pdev); + goto intx; + } + ioat_dma->irq_mode = IOAT_MSI; + goto done; + +intx: + err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, + IRQF_SHARED, "ioat-intx", ioat_dma); + if (err) + goto err_no_irq; + + ioat_dma->irq_mode = IOAT_INTX; +done: + if (is_bwd_ioat(pdev)) + ioat_intr_quirk(ioat_dma); + intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; + writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); + return 0; + +err_no_irq: + /* Disable all interrupt generation */ + writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); + ioat_dma->irq_mode = IOAT_NOIRQ; + dev_err(dev, "no usable interrupts\n"); + return err; +} + +static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma) +{ + /* Disable all interrupt generation */ + writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); +} + +static int ioat_probe(struct ioatdma_device *ioat_dma) +{ + int err = -ENODEV; + struct dma_device *dma = &ioat_dma->dma_dev; + struct pci_dev *pdev = ioat_dma->pdev; + struct device *dev = &pdev->dev; + + /* DMA coherent memory pool for DMA descriptor allocations */ + ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev, + sizeof(struct ioat_dma_descriptor), + 64, 0); + if (!ioat_dma->dma_pool) { + err = -ENOMEM; + goto err_dma_pool; + } + + ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev, + sizeof(u64), + SMP_CACHE_BYTES, + SMP_CACHE_BYTES); + + if (!ioat_dma->completion_pool) { + err = -ENOMEM; + goto err_completion_pool; + } + + ioat_enumerate_channels(ioat_dma); + + dma_cap_set(DMA_MEMCPY, dma->cap_mask); + dma->dev = &pdev->dev; + + if (!dma->chancnt) { + dev_err(dev, "channel enumeration error\n"); + goto err_setup_interrupts; + } + + err = ioat_dma_setup_interrupts(ioat_dma); + if (err) + goto err_setup_interrupts; + + err = ioat3_dma_self_test(ioat_dma); + if (err) + goto err_self_test; + + return 0; + +err_self_test: + ioat_disable_interrupts(ioat_dma); +err_setup_interrupts: + pci_pool_destroy(ioat_dma->completion_pool); +err_completion_pool: + pci_pool_destroy(ioat_dma->dma_pool); +err_dma_pool: + return err; +} + +static int ioat_register(struct ioatdma_device *ioat_dma) +{ + int err = dma_async_device_register(&ioat_dma->dma_dev); + + if (err) { + ioat_disable_interrupts(ioat_dma); + pci_pool_destroy(ioat_dma->completion_pool); + pci_pool_destroy(ioat_dma->dma_pool); + } + + return err; +} + +static void ioat_dma_remove(struct ioatdma_device *ioat_dma) +{ + struct dma_device *dma = &ioat_dma->dma_dev; + + ioat_disable_interrupts(ioat_dma); + + ioat_kobject_del(ioat_dma); + + dma_async_device_unregister(dma); + + pci_pool_destroy(ioat_dma->dma_pool); + pci_pool_destroy(ioat_dma->completion_pool); + + INIT_LIST_HEAD(&dma->channels); +} + +/** + * ioat_enumerate_channels - find and initialize the device's channels + * @ioat_dma: the ioat dma device to be enumerated + */ +static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma) +{ + struct ioatdma_chan *ioat_chan; + struct device *dev = &ioat_dma->pdev->dev; + struct dma_device *dma = &ioat_dma->dma_dev; + u8 xfercap_log; + int i; + + INIT_LIST_HEAD(&dma->channels); + dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET); + dma->chancnt &= 0x1f; /* bits [4:0] valid */ + if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) { + dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", + dma->chancnt, ARRAY_SIZE(ioat_dma->idx)); + dma->chancnt = ARRAY_SIZE(ioat_dma->idx); + } + xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET); + xfercap_log &= 0x1f; /* bits [4:0] valid */ + if (xfercap_log == 0) + return 0; + dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); + + for (i = 0; i < dma->chancnt; i++) { + ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); + if (!ioat_chan) + break; + + ioat_init_channel(ioat_dma, ioat_chan, i); + ioat_chan->xfercap_log = xfercap_log; + spin_lock_init(&ioat_chan->prep_lock); + if (ioat_reset_hw(ioat_chan)) { + i = 0; + break; + } + } + dma->chancnt = i; + return i; +} + +/** + * ioat_free_chan_resources - release all the descriptors + * @chan: the channel to be cleaned + */ +static void ioat_free_chan_resources(struct dma_chan *c) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; + struct ioat_ring_ent *desc; + const int total_descs = 1 << ioat_chan->alloc_order; + int descs; + int i; + + /* Before freeing channel resources first check + * if they have been previously allocated for this channel. + */ + if (!ioat_chan->ring) + return; + + ioat_stop(ioat_chan); + ioat_reset_hw(ioat_chan); + + spin_lock_bh(&ioat_chan->cleanup_lock); + spin_lock_bh(&ioat_chan->prep_lock); + descs = ioat_ring_space(ioat_chan); + dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs); + for (i = 0; i < descs; i++) { + desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i); + ioat_free_ring_ent(desc, c); + } + + if (descs < total_descs) + dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n", + total_descs - descs); + + for (i = 0; i < total_descs - descs; i++) { + desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i); + dump_desc_dbg(ioat_chan, desc); + ioat_free_ring_ent(desc, c); + } + + kfree(ioat_chan->ring); + ioat_chan->ring = NULL; + ioat_chan->alloc_order = 0; + pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion, + ioat_chan->completion_dma); + spin_unlock_bh(&ioat_chan->prep_lock); + spin_unlock_bh(&ioat_chan->cleanup_lock); + + ioat_chan->last_completion = 0; + ioat_chan->completion_dma = 0; + ioat_chan->dmacount = 0; +} + +/* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring + * @chan: channel to be initialized + */ +static int ioat_alloc_chan_resources(struct dma_chan *c) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + struct ioat_ring_ent **ring; + u64 status; + int order; + int i = 0; + u32 chanerr; + + /* have we already been set up? */ + if (ioat_chan->ring) + return 1 << ioat_chan->alloc_order; + + /* Setup register to interrupt and write completion status on error */ + writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); + + /* allocate a completion writeback area */ + /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ + ioat_chan->completion = + pci_pool_alloc(ioat_chan->ioat_dma->completion_pool, + GFP_KERNEL, &ioat_chan->completion_dma); + if (!ioat_chan->completion) + return -ENOMEM; + + memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion)); + writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF, + ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); + writel(((u64)ioat_chan->completion_dma) >> 32, + ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); + + order = ioat_get_alloc_order(); + ring = ioat_alloc_ring(c, order, GFP_KERNEL); + if (!ring) + return -ENOMEM; + + spin_lock_bh(&ioat_chan->cleanup_lock); + spin_lock_bh(&ioat_chan->prep_lock); + ioat_chan->ring = ring; + ioat_chan->head = 0; + ioat_chan->issued = 0; + ioat_chan->tail = 0; + ioat_chan->alloc_order = order; + set_bit(IOAT_RUN, &ioat_chan->state); + spin_unlock_bh(&ioat_chan->prep_lock); + spin_unlock_bh(&ioat_chan->cleanup_lock); + + ioat_start_null_desc(ioat_chan); + + /* check that we got off the ground */ + do { + udelay(1); + status = ioat_chansts(ioat_chan); + } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status)); + + if (is_ioat_active(status) || is_ioat_idle(status)) + return 1 << ioat_chan->alloc_order; + + chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + + dev_WARN(to_dev(ioat_chan), + "failed to start channel chanerr: %#x\n", chanerr); + ioat_free_chan_resources(c); + return -EFAULT; +} + +/* common channel initialization */ +static void +ioat_init_channel(struct ioatdma_device *ioat_dma, + struct ioatdma_chan *ioat_chan, int idx) +{ + struct dma_device *dma = &ioat_dma->dma_dev; + struct dma_chan *c = &ioat_chan->dma_chan; + unsigned long data = (unsigned long) c; + + ioat_chan->ioat_dma = ioat_dma; + ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1)); + spin_lock_init(&ioat_chan->cleanup_lock); + ioat_chan->dma_chan.device = dma; + dma_cookie_init(&ioat_chan->dma_chan); + list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); + ioat_dma->idx[idx] = ioat_chan; + init_timer(&ioat_chan->timer); + ioat_chan->timer.function = ioat_timer_event; + ioat_chan->timer.data = data; + tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data); +} + +#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ +static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) +{ + int i, src_idx; + struct page *dest; + struct page *xor_srcs[IOAT_NUM_SRC_TEST]; + struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; + dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; + dma_addr_t dest_dma; + struct dma_async_tx_descriptor *tx; + struct dma_chan *dma_chan; + dma_cookie_t cookie; + u8 cmp_byte = 0; + u32 cmp_word; + u32 xor_val_result; + int err = 0; + struct completion cmp; + unsigned long tmo; + struct device *dev = &ioat_dma->pdev->dev; + struct dma_device *dma = &ioat_dma->dma_dev; + u8 op = 0; + + dev_dbg(dev, "%s\n", __func__); + + if (!dma_has_cap(DMA_XOR, dma->cap_mask)) + return 0; + + for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { + xor_srcs[src_idx] = alloc_page(GFP_KERNEL); + if (!xor_srcs[src_idx]) { + while (src_idx--) + __free_page(xor_srcs[src_idx]); + return -ENOMEM; + } + } + + dest = alloc_page(GFP_KERNEL); + if (!dest) { + while (src_idx--) + __free_page(xor_srcs[src_idx]); + return -ENOMEM; + } + + /* Fill in src buffers */ + for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { + u8 *ptr = page_address(xor_srcs[src_idx]); + + for (i = 0; i < PAGE_SIZE; i++) + ptr[i] = (1 << src_idx); + } + + for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) + cmp_byte ^= (u8) (1 << src_idx); + + cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | + (cmp_byte << 8) | cmp_byte; + + memset(page_address(dest), 0, PAGE_SIZE); + + dma_chan = container_of(dma->channels.next, struct dma_chan, + device_node); + if (dma->device_alloc_chan_resources(dma_chan) < 1) { + err = -ENODEV; + goto out; + } + + /* test xor */ + op = IOAT_OP_XOR; + + dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dest_dma)) + goto dma_unmap; + + for (i = 0; i < IOAT_NUM_SRC_TEST; i++) + dma_srcs[i] = DMA_ERROR_CODE; + for (i = 0; i < IOAT_NUM_SRC_TEST; i++) { + dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_srcs[i])) + goto dma_unmap; + } + tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, + IOAT_NUM_SRC_TEST, PAGE_SIZE, + DMA_PREP_INTERRUPT); + + if (!tx) { + dev_err(dev, "Self-test xor prep failed\n"); + err = -ENODEV; + goto dma_unmap; + } + + async_tx_ack(tx); + init_completion(&cmp); + tx->callback = ioat_dma_test_callback; + tx->callback_param = &cmp; + cookie = tx->tx_submit(tx); + if (cookie < 0) { + dev_err(dev, "Self-test xor setup failed\n"); + err = -ENODEV; + goto dma_unmap; + } + dma->device_issue_pending(dma_chan); + + tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); + + if (tmo == 0 || + dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { + dev_err(dev, "Self-test xor timed out\n"); + err = -ENODEV; + goto dma_unmap; + } + + for (i = 0; i < IOAT_NUM_SRC_TEST; i++) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); + + dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); + for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { + u32 *ptr = page_address(dest); + + if (ptr[i] != cmp_word) { + dev_err(dev, "Self-test xor failed compare\n"); + err = -ENODEV; + goto free_resources; + } + } + dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); + + dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); + + /* skip validate if the capability is not present */ + if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) + goto free_resources; + + op = IOAT_OP_XOR_VAL; + + /* validate the sources with the destintation page */ + for (i = 0; i < IOAT_NUM_SRC_TEST; i++) + xor_val_srcs[i] = xor_srcs[i]; + xor_val_srcs[i] = dest; + + xor_val_result = 1; + + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) + dma_srcs[i] = DMA_ERROR_CODE; + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { + dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_srcs[i])) + goto dma_unmap; + } + tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, + IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, + &xor_val_result, DMA_PREP_INTERRUPT); + if (!tx) { + dev_err(dev, "Self-test zero prep failed\n"); + err = -ENODEV; + goto dma_unmap; + } + + async_tx_ack(tx); + init_completion(&cmp); + tx->callback = ioat_dma_test_callback; + tx->callback_param = &cmp; + cookie = tx->tx_submit(tx); + if (cookie < 0) { + dev_err(dev, "Self-test zero setup failed\n"); + err = -ENODEV; + goto dma_unmap; + } + dma->device_issue_pending(dma_chan); + + tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); + + if (tmo == 0 || + dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { + dev_err(dev, "Self-test validate timed out\n"); + err = -ENODEV; + goto dma_unmap; + } + + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); + + if (xor_val_result != 0) { + dev_err(dev, "Self-test validate failed compare\n"); + err = -ENODEV; + goto free_resources; + } + + memset(page_address(dest), 0, PAGE_SIZE); + + /* test for non-zero parity sum */ + op = IOAT_OP_XOR_VAL; + + xor_val_result = 0; + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) + dma_srcs[i] = DMA_ERROR_CODE; + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { + dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_srcs[i])) + goto dma_unmap; + } + tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, + IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, + &xor_val_result, DMA_PREP_INTERRUPT); + if (!tx) { + dev_err(dev, "Self-test 2nd zero prep failed\n"); + err = -ENODEV; + goto dma_unmap; + } + + async_tx_ack(tx); + init_completion(&cmp); + tx->callback = ioat_dma_test_callback; + tx->callback_param = &cmp; + cookie = tx->tx_submit(tx); + if (cookie < 0) { + dev_err(dev, "Self-test 2nd zero setup failed\n"); + err = -ENODEV; + goto dma_unmap; + } + dma->device_issue_pending(dma_chan); + + tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); + + if (tmo == 0 || + dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { + dev_err(dev, "Self-test 2nd validate timed out\n"); + err = -ENODEV; + goto dma_unmap; + } + + if (xor_val_result != SUM_CHECK_P_RESULT) { + dev_err(dev, "Self-test validate failed compare\n"); + err = -ENODEV; + goto dma_unmap; + } + + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); + + goto free_resources; +dma_unmap: + if (op == IOAT_OP_XOR) { + if (dest_dma != DMA_ERROR_CODE) + dma_unmap_page(dev, dest_dma, PAGE_SIZE, + DMA_FROM_DEVICE); + for (i = 0; i < IOAT_NUM_SRC_TEST; i++) + if (dma_srcs[i] != DMA_ERROR_CODE) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, + DMA_TO_DEVICE); + } else if (op == IOAT_OP_XOR_VAL) { + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) + if (dma_srcs[i] != DMA_ERROR_CODE) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, + DMA_TO_DEVICE); + } +free_resources: + dma->device_free_chan_resources(dma_chan); +out: + src_idx = IOAT_NUM_SRC_TEST; + while (src_idx--) + __free_page(xor_srcs[src_idx]); + __free_page(dest); + return err; +} + +static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma) +{ + int rc; + + rc = ioat_dma_self_test(ioat_dma); + if (rc) + return rc; + + rc = ioat_xor_val_self_test(ioat_dma); + + return rc; +} + +static void ioat_intr_quirk(struct ioatdma_device *ioat_dma) +{ + struct dma_device *dma; + struct dma_chan *c; + struct ioatdma_chan *ioat_chan; + u32 errmask; + + dma = &ioat_dma->dma_dev; + + /* + * if we have descriptor write back error status, we mask the + * error interrupts + */ + if (ioat_dma->cap & IOAT_CAP_DWBES) { + list_for_each_entry(c, &dma->channels, device_node) { + ioat_chan = to_ioat_chan(c); + errmask = readl(ioat_chan->reg_base + + IOAT_CHANERR_MASK_OFFSET); + errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR | + IOAT_CHANERR_XOR_Q_ERR; + writel(errmask, ioat_chan->reg_base + + IOAT_CHANERR_MASK_OFFSET); + } + } +} + +static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) +{ + struct pci_dev *pdev = ioat_dma->pdev; + int dca_en = system_has_dca_enabled(pdev); + struct dma_device *dma; + struct dma_chan *c; + struct ioatdma_chan *ioat_chan; + bool is_raid_device = false; + int err; + + dma = &ioat_dma->dma_dev; + dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock; + dma->device_issue_pending = ioat_issue_pending; + dma->device_alloc_chan_resources = ioat_alloc_chan_resources; + dma->device_free_chan_resources = ioat_free_chan_resources; + + dma_cap_set(DMA_INTERRUPT, dma->cap_mask); + dma->device_prep_dma_interrupt = ioat_prep_interrupt_lock; + + ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET); + + if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev)) + ioat_dma->cap &= + ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); + + /* dca is incompatible with raid operations */ + if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) + ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); + + if (ioat_dma->cap & IOAT_CAP_XOR) { + is_raid_device = true; + dma->max_xor = 8; + + dma_cap_set(DMA_XOR, dma->cap_mask); + dma->device_prep_dma_xor = ioat_prep_xor; + + dma_cap_set(DMA_XOR_VAL, dma->cap_mask); + dma->device_prep_dma_xor_val = ioat_prep_xor_val; + } + + if (ioat_dma->cap & IOAT_CAP_PQ) { + is_raid_device = true; + + dma->device_prep_dma_pq = ioat_prep_pq; + dma->device_prep_dma_pq_val = ioat_prep_pq_val; + dma_cap_set(DMA_PQ, dma->cap_mask); + dma_cap_set(DMA_PQ_VAL, dma->cap_mask); + + if (ioat_dma->cap & IOAT_CAP_RAID16SS) + dma_set_maxpq(dma, 16, 0); + else + dma_set_maxpq(dma, 8, 0); + + if (!(ioat_dma->cap & IOAT_CAP_XOR)) { + dma->device_prep_dma_xor = ioat_prep_pqxor; + dma->device_prep_dma_xor_val = ioat_prep_pqxor_val; + dma_cap_set(DMA_XOR, dma->cap_mask); + dma_cap_set(DMA_XOR_VAL, dma->cap_mask); + + if (ioat_dma->cap & IOAT_CAP_RAID16SS) + dma->max_xor = 16; + else + dma->max_xor = 8; + } + } + + dma->device_tx_status = ioat_tx_status; + + /* starting with CB3.3 super extended descriptors are supported */ + if (ioat_dma->cap & IOAT_CAP_RAID16SS) { + char pool_name[14]; + int i; + + for (i = 0; i < MAX_SED_POOLS; i++) { + snprintf(pool_name, 14, "ioat_hw%d_sed", i); + + /* allocate SED DMA pool */ + ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name, + &pdev->dev, + SED_SIZE * (i + 1), 64, 0); + if (!ioat_dma->sed_hw_pool[i]) + return -ENOMEM; + + } + } + + if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ))) + dma_cap_set(DMA_PRIVATE, dma->cap_mask); + + err = ioat_probe(ioat_dma); + if (err) + return err; + + list_for_each_entry(c, &dma->channels, device_node) { + ioat_chan = to_ioat_chan(c); + writel(IOAT_DMA_DCA_ANY_CPU, + ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); + } + + err = ioat_register(ioat_dma); + if (err) + return err; + + ioat_kobject_add(ioat_dma, &ioat_ktype); + + if (dca) + ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base); + + return 0; +} + +#define DRV_NAME "ioatdma" + +static struct pci_driver ioat_pci_driver = { + .name = DRV_NAME, + .id_table = ioat_pci_tbl, + .probe = ioat_pci_probe, + .remove = ioat_remove, +}; + +static struct ioatdma_device * +alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) +{ + struct device *dev = &pdev->dev; + struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); + + if (!d) + return NULL; + d->pdev = pdev; + d->reg_base = iobase; + return d; +} + +static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + void __iomem * const *iomap; + struct device *dev = &pdev->dev; + struct ioatdma_device *device; + int err; + + err = pcim_enable_device(pdev); + if (err) + return err; + + err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME); + if (err) + return err; + iomap = pcim_iomap_table(pdev); + if (!iomap) + return -ENOMEM; + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) + return err; + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) + return err; + + device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); + if (!device) + return -ENOMEM; + pci_set_master(pdev); + pci_set_drvdata(pdev, device); + + device->version = readb(device->reg_base + IOAT_VER_OFFSET); + if (device->version >= IOAT_VER_3_0) + err = ioat3_dma_probe(device, ioat_dca_enabled); + else + return -ENODEV; + + if (err) { + dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); + return -ENODEV; + } + + return 0; +} + +static void ioat_remove(struct pci_dev *pdev) +{ + struct ioatdma_device *device = pci_get_drvdata(pdev); + + if (!device) + return; + + dev_err(&pdev->dev, "Removing dma and dca services\n"); + if (device->dca) { + unregister_dca_provider(device->dca, &pdev->dev); + free_dca_provider(device->dca); + device->dca = NULL; + } + ioat_dma_remove(device); +} + +static int __init ioat_init_module(void) +{ + int err = -ENOMEM; + + pr_info("%s: Intel(R) QuickData Technology Driver %s\n", + DRV_NAME, IOAT_DMA_VERSION); + + ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!ioat_cache) + return -ENOMEM; + + ioat_sed_cache = KMEM_CACHE(ioat_sed_ent, 0); + if (!ioat_sed_cache) + goto err_ioat_cache; + + err = pci_register_driver(&ioat_pci_driver); + if (err) + goto err_ioat3_cache; + + return 0; + + err_ioat3_cache: + kmem_cache_destroy(ioat_sed_cache); + + err_ioat_cache: + kmem_cache_destroy(ioat_cache); + + return err; +} +module_init(ioat_init_module); + +static void __exit ioat_exit_module(void) +{ + pci_unregister_driver(&ioat_pci_driver); + kmem_cache_destroy(ioat_cache); +} +module_exit(ioat_exit_module); diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c deleted file mode 100644 index 76f0dc688..000000000 --- a/drivers/dma/ioat/pci.c +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Intel I/OAT DMA Linux driver - * Copyright(c) 2007 - 2009 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - */ - -/* - * This driver supports an Intel I/OAT DMA engine, which does asynchronous - * copy operations. - */ - -#include -#include -#include -#include -#include -#include -#include "dma.h" -#include "dma_v2.h" -#include "registers.h" -#include "hw.h" - -MODULE_VERSION(IOAT_DMA_VERSION); -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("Intel Corporation"); - -static struct pci_device_id ioat_pci_tbl[] = { - /* I/OAT v1 platforms */ - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) }, - { PCI_VDEVICE(UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) }, - - /* I/OAT v2 platforms */ - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) }, - - /* I/OAT v3 platforms */ - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) }, - - /* I/OAT v3.2 platforms */ - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) }, - - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) }, - - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) }, - - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) }, - - /* I/OAT v3.3 platforms */ - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) }, - - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) }, - - { 0, } -}; -MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); - -static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); -static void ioat_remove(struct pci_dev *pdev); - -static int ioat_dca_enabled = 1; -module_param(ioat_dca_enabled, int, 0644); -MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); - -struct kmem_cache *ioat2_cache; -struct kmem_cache *ioat3_sed_cache; - -#define DRV_NAME "ioatdma" - -static struct pci_driver ioat_pci_driver = { - .name = DRV_NAME, - .id_table = ioat_pci_tbl, - .probe = ioat_pci_probe, - .remove = ioat_remove, -}; - -static struct ioatdma_device * -alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) -{ - struct device *dev = &pdev->dev; - struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); - - if (!d) - return NULL; - d->pdev = pdev; - d->reg_base = iobase; - return d; -} - -static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - void __iomem * const *iomap; - struct device *dev = &pdev->dev; - struct ioatdma_device *device; - int err; - - err = pcim_enable_device(pdev); - if (err) - return err; - - err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME); - if (err) - return err; - iomap = pcim_iomap_table(pdev); - if (!iomap) - return -ENOMEM; - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) - return err; - - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) - return err; - - device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); - if (!device) - return -ENOMEM; - pci_set_master(pdev); - pci_set_drvdata(pdev, device); - - device->version = readb(device->reg_base + IOAT_VER_OFFSET); - if (device->version == IOAT_VER_1_2) - err = ioat1_dma_probe(device, ioat_dca_enabled); - else if (device->version == IOAT_VER_2_0) - err = ioat2_dma_probe(device, ioat_dca_enabled); - else if (device->version >= IOAT_VER_3_0) - err = ioat3_dma_probe(device, ioat_dca_enabled); - else - return -ENODEV; - - if (err) { - dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); - return -ENODEV; - } - - return 0; -} - -static void ioat_remove(struct pci_dev *pdev) -{ - struct ioatdma_device *device = pci_get_drvdata(pdev); - - if (!device) - return; - - dev_err(&pdev->dev, "Removing dma and dca services\n"); - if (device->dca) { - unregister_dca_provider(device->dca, &pdev->dev); - free_dca_provider(device->dca); - device->dca = NULL; - } - ioat_dma_remove(device); -} - -static int __init ioat_init_module(void) -{ - int err = -ENOMEM; - - pr_info("%s: Intel(R) QuickData Technology Driver %s\n", - DRV_NAME, IOAT_DMA_VERSION); - - ioat2_cache = kmem_cache_create("ioat2", sizeof(struct ioat_ring_ent), - 0, SLAB_HWCACHE_ALIGN, NULL); - if (!ioat2_cache) - return -ENOMEM; - - ioat3_sed_cache = KMEM_CACHE(ioat_sed_ent, 0); - if (!ioat3_sed_cache) - goto err_ioat2_cache; - - err = pci_register_driver(&ioat_pci_driver); - if (err) - goto err_ioat3_cache; - - return 0; - - err_ioat3_cache: - kmem_cache_destroy(ioat3_sed_cache); - - err_ioat2_cache: - kmem_cache_destroy(ioat2_cache); - - return err; -} -module_init(ioat_init_module); - -static void __exit ioat_exit_module(void) -{ - pci_unregister_driver(&ioat_pci_driver); - kmem_cache_destroy(ioat2_cache); -} -module_exit(ioat_exit_module); diff --git a/drivers/dma/ioat/prep.c b/drivers/dma/ioat/prep.c new file mode 100644 index 000000000..ad4fb41cd --- /dev/null +++ b/drivers/dma/ioat/prep.c @@ -0,0 +1,715 @@ +/* + * Intel I/OAT DMA Linux driver + * Copyright(c) 2004 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ +#include +#include +#include +#include +#include +#include +#include "../dmaengine.h" +#include "registers.h" +#include "hw.h" +#include "dma.h" + +#define MAX_SCF 1024 + +/* provide a lookup table for setting the source address in the base or + * extended descriptor of an xor or pq descriptor + */ +static const u8 xor_idx_to_desc = 0xe0; +static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 }; +static const u8 pq_idx_to_desc = 0xf8; +static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2 }; +static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; +static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, + 0, 1, 2, 3, 4, 5, 6 }; + +static void xor_set_src(struct ioat_raw_descriptor *descs[2], + dma_addr_t addr, u32 offset, int idx) +{ + struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; + + raw->field[xor_idx_to_field[idx]] = addr + offset; +} + +static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx) +{ + struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; + + return raw->field[pq_idx_to_field[idx]]; +} + +static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx) +{ + struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; + + return raw->field[pq16_idx_to_field[idx]]; +} + +static void pq_set_src(struct ioat_raw_descriptor *descs[2], + dma_addr_t addr, u32 offset, u8 coef, int idx) +{ + struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0]; + struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; + + raw->field[pq_idx_to_field[idx]] = addr + offset; + pq->coef[idx] = coef; +} + +static void pq16_set_src(struct ioat_raw_descriptor *desc[3], + dma_addr_t addr, u32 offset, u8 coef, unsigned idx) +{ + struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0]; + struct ioat_pq16a_descriptor *pq16 = + (struct ioat_pq16a_descriptor *)desc[1]; + struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; + + raw->field[pq16_idx_to_field[idx]] = addr + offset; + + if (idx < 8) + pq->coef[idx] = coef; + else + pq16->coef[idx - 8] = coef; +} + +static struct ioat_sed_ent * +ioat3_alloc_sed(struct ioatdma_device *ioat_dma, unsigned int hw_pool) +{ + struct ioat_sed_ent *sed; + gfp_t flags = __GFP_ZERO | GFP_ATOMIC; + + sed = kmem_cache_alloc(ioat_sed_cache, flags); + if (!sed) + return NULL; + + sed->hw_pool = hw_pool; + sed->hw = dma_pool_alloc(ioat_dma->sed_hw_pool[hw_pool], + flags, &sed->dma); + if (!sed->hw) { + kmem_cache_free(ioat_sed_cache, sed); + return NULL; + } + + return sed; +} + +struct dma_async_tx_descriptor * +ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, unsigned long flags) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + struct ioat_dma_descriptor *hw; + struct ioat_ring_ent *desc; + dma_addr_t dst = dma_dest; + dma_addr_t src = dma_src; + size_t total_len = len; + int num_descs, idx, i; + + num_descs = ioat_xferlen_to_descs(ioat_chan, len); + if (likely(num_descs) && + ioat_check_space_lock(ioat_chan, num_descs) == 0) + idx = ioat_chan->head; + else + return NULL; + i = 0; + do { + size_t copy = min_t(size_t, len, 1 << ioat_chan->xfercap_log); + + desc = ioat_get_ring_ent(ioat_chan, idx + i); + hw = desc->hw; + + hw->size = copy; + hw->ctl = 0; + hw->src_addr = src; + hw->dst_addr = dst; + + len -= copy; + dst += copy; + src += copy; + dump_desc_dbg(ioat_chan, desc); + } while (++i < num_descs); + + desc->txd.flags = flags; + desc->len = total_len; + hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); + hw->ctl_f.compl_write = 1; + dump_desc_dbg(ioat_chan, desc); + /* we leave the channel locked to ensure in order submission */ + + return &desc->txd; +} + + +static struct dma_async_tx_descriptor * +__ioat_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, + dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, + size_t len, unsigned long flags) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + struct ioat_ring_ent *compl_desc; + struct ioat_ring_ent *desc; + struct ioat_ring_ent *ext; + size_t total_len = len; + struct ioat_xor_descriptor *xor; + struct ioat_xor_ext_descriptor *xor_ex = NULL; + struct ioat_dma_descriptor *hw; + int num_descs, with_ext, idx, i; + u32 offset = 0; + u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; + + BUG_ON(src_cnt < 2); + + num_descs = ioat_xferlen_to_descs(ioat_chan, len); + /* we need 2x the number of descriptors to cover greater than 5 + * sources + */ + if (src_cnt > 5) { + with_ext = 1; + num_descs *= 2; + } else + with_ext = 0; + + /* completion writes from the raid engine may pass completion + * writes from the legacy engine, so we need one extra null + * (legacy) descriptor to ensure all completion writes arrive in + * order. + */ + if (likely(num_descs) && + ioat_check_space_lock(ioat_chan, num_descs+1) == 0) + idx = ioat_chan->head; + else + return NULL; + i = 0; + do { + struct ioat_raw_descriptor *descs[2]; + size_t xfer_size = min_t(size_t, + len, 1 << ioat_chan->xfercap_log); + int s; + + desc = ioat_get_ring_ent(ioat_chan, idx + i); + xor = desc->xor; + + /* save a branch by unconditionally retrieving the + * extended descriptor xor_set_src() knows to not write + * to it in the single descriptor case + */ + ext = ioat_get_ring_ent(ioat_chan, idx + i + 1); + xor_ex = ext->xor_ex; + + descs[0] = (struct ioat_raw_descriptor *) xor; + descs[1] = (struct ioat_raw_descriptor *) xor_ex; + for (s = 0; s < src_cnt; s++) + xor_set_src(descs, src[s], offset, s); + xor->size = xfer_size; + xor->dst_addr = dest + offset; + xor->ctl = 0; + xor->ctl_f.op = op; + xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt); + + len -= xfer_size; + offset += xfer_size; + dump_desc_dbg(ioat_chan, desc); + } while ((i += 1 + with_ext) < num_descs); + + /* last xor descriptor carries the unmap parameters and fence bit */ + desc->txd.flags = flags; + desc->len = total_len; + if (result) + desc->result = result; + xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE); + + /* completion descriptor carries interrupt bit */ + compl_desc = ioat_get_ring_ent(ioat_chan, idx + i); + compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; + hw = compl_desc->hw; + hw->ctl = 0; + hw->ctl_f.null = 1; + hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + hw->ctl_f.compl_write = 1; + hw->size = NULL_DESC_BUFFER_SIZE; + dump_desc_dbg(ioat_chan, compl_desc); + + /* we leave the channel locked to ensure in order submission */ + return &compl_desc->txd; +} + +struct dma_async_tx_descriptor * +ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, + unsigned int src_cnt, size_t len, unsigned long flags) +{ + return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); +} + +struct dma_async_tx_descriptor * +ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, + unsigned int src_cnt, size_t len, + enum sum_check_flags *result, unsigned long flags) +{ + /* the cleanup routine only sets bits on validate failure, it + * does not clear bits on validate success... so clear it here + */ + *result = 0; + + return __ioat_prep_xor_lock(chan, result, src[0], &src[1], + src_cnt - 1, len, flags); +} + +static void +dump_pq_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc, + struct ioat_ring_ent *ext) +{ + struct device *dev = to_dev(ioat_chan); + struct ioat_pq_descriptor *pq = desc->pq; + struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL; + struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex }; + int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); + int i; + + dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" + " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" + " src_cnt: %d)\n", + desc_id(desc), (unsigned long long) desc->txd.phys, + (unsigned long long) (pq_ex ? pq_ex->next : pq->next), + desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, + pq->ctl_f.int_en, pq->ctl_f.compl_write, + pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", + pq->ctl_f.src_cnt); + for (i = 0; i < src_cnt; i++) + dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, + (unsigned long long) pq_get_src(descs, i), pq->coef[i]); + dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); + dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); + dev_dbg(dev, "\tNEXT: %#llx\n", pq->next); +} + +static void dump_pq16_desc_dbg(struct ioatdma_chan *ioat_chan, + struct ioat_ring_ent *desc) +{ + struct device *dev = to_dev(ioat_chan); + struct ioat_pq_descriptor *pq = desc->pq; + struct ioat_raw_descriptor *descs[] = { (void *)pq, + (void *)pq, + (void *)pq }; + int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); + int i; + + if (desc->sed) { + descs[1] = (void *)desc->sed->hw; + descs[2] = (void *)desc->sed->hw + 64; + } + + dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" + " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" + " src_cnt: %d)\n", + desc_id(desc), (unsigned long long) desc->txd.phys, + (unsigned long long) pq->next, + desc->txd.flags, pq->size, pq->ctl, + pq->ctl_f.op, pq->ctl_f.int_en, + pq->ctl_f.compl_write, + pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", + pq->ctl_f.src_cnt); + for (i = 0; i < src_cnt; i++) { + dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, + (unsigned long long) pq16_get_src(descs, i), + pq->coef[i]); + } + dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); + dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); +} + +static struct dma_async_tx_descriptor * +__ioat_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, + const dma_addr_t *dst, const dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, + size_t len, unsigned long flags) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; + struct ioat_ring_ent *compl_desc; + struct ioat_ring_ent *desc; + struct ioat_ring_ent *ext; + size_t total_len = len; + struct ioat_pq_descriptor *pq; + struct ioat_pq_ext_descriptor *pq_ex = NULL; + struct ioat_dma_descriptor *hw; + u32 offset = 0; + u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; + int i, s, idx, with_ext, num_descs; + int cb32 = (ioat_dma->version < IOAT_VER_3_3) ? 1 : 0; + + dev_dbg(to_dev(ioat_chan), "%s\n", __func__); + /* the engine requires at least two sources (we provide + * at least 1 implied source in the DMA_PREP_CONTINUE case) + */ + BUG_ON(src_cnt + dmaf_continue(flags) < 2); + + num_descs = ioat_xferlen_to_descs(ioat_chan, len); + /* we need 2x the number of descriptors to cover greater than 3 + * sources (we need 1 extra source in the q-only continuation + * case and 3 extra sources in the p+q continuation case. + */ + if (src_cnt + dmaf_p_disabled_continue(flags) > 3 || + (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) { + with_ext = 1; + num_descs *= 2; + } else + with_ext = 0; + + /* completion writes from the raid engine may pass completion + * writes from the legacy engine, so we need one extra null + * (legacy) descriptor to ensure all completion writes arrive in + * order. + */ + if (likely(num_descs) && + ioat_check_space_lock(ioat_chan, num_descs + cb32) == 0) + idx = ioat_chan->head; + else + return NULL; + i = 0; + do { + struct ioat_raw_descriptor *descs[2]; + size_t xfer_size = min_t(size_t, len, + 1 << ioat_chan->xfercap_log); + + desc = ioat_get_ring_ent(ioat_chan, idx + i); + pq = desc->pq; + + /* save a branch by unconditionally retrieving the + * extended descriptor pq_set_src() knows to not write + * to it in the single descriptor case + */ + ext = ioat_get_ring_ent(ioat_chan, idx + i + with_ext); + pq_ex = ext->pq_ex; + + descs[0] = (struct ioat_raw_descriptor *) pq; + descs[1] = (struct ioat_raw_descriptor *) pq_ex; + + for (s = 0; s < src_cnt; s++) + pq_set_src(descs, src[s], offset, scf[s], s); + + /* see the comment for dma_maxpq in include/linux/dmaengine.h */ + if (dmaf_p_disabled_continue(flags)) + pq_set_src(descs, dst[1], offset, 1, s++); + else if (dmaf_continue(flags)) { + pq_set_src(descs, dst[0], offset, 0, s++); + pq_set_src(descs, dst[1], offset, 1, s++); + pq_set_src(descs, dst[1], offset, 0, s++); + } + pq->size = xfer_size; + pq->p_addr = dst[0] + offset; + pq->q_addr = dst[1] + offset; + pq->ctl = 0; + pq->ctl_f.op = op; + /* we turn on descriptor write back error status */ + if (ioat_dma->cap & IOAT_CAP_DWBES) + pq->ctl_f.wb_en = result ? 1 : 0; + pq->ctl_f.src_cnt = src_cnt_to_hw(s); + pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); + pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); + + len -= xfer_size; + offset += xfer_size; + } while ((i += 1 + with_ext) < num_descs); + + /* last pq descriptor carries the unmap parameters and fence bit */ + desc->txd.flags = flags; + desc->len = total_len; + if (result) + desc->result = result; + pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); + dump_pq_desc_dbg(ioat_chan, desc, ext); + + if (!cb32) { + pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + pq->ctl_f.compl_write = 1; + compl_desc = desc; + } else { + /* completion descriptor carries interrupt bit */ + compl_desc = ioat_get_ring_ent(ioat_chan, idx + i); + compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; + hw = compl_desc->hw; + hw->ctl = 0; + hw->ctl_f.null = 1; + hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + hw->ctl_f.compl_write = 1; + hw->size = NULL_DESC_BUFFER_SIZE; + dump_desc_dbg(ioat_chan, compl_desc); + } + + + /* we leave the channel locked to ensure in order submission */ + return &compl_desc->txd; +} + +static struct dma_async_tx_descriptor * +__ioat_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, + const dma_addr_t *dst, const dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, + size_t len, unsigned long flags) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; + struct ioat_ring_ent *desc; + size_t total_len = len; + struct ioat_pq_descriptor *pq; + u32 offset = 0; + u8 op; + int i, s, idx, num_descs; + + /* this function is only called with 9-16 sources */ + op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; + + dev_dbg(to_dev(ioat_chan), "%s\n", __func__); + + num_descs = ioat_xferlen_to_descs(ioat_chan, len); + + /* + * 16 source pq is only available on cb3.3 and has no completion + * write hw bug. + */ + if (num_descs && ioat_check_space_lock(ioat_chan, num_descs) == 0) + idx = ioat_chan->head; + else + return NULL; + + i = 0; + + do { + struct ioat_raw_descriptor *descs[4]; + size_t xfer_size = min_t(size_t, len, + 1 << ioat_chan->xfercap_log); + + desc = ioat_get_ring_ent(ioat_chan, idx + i); + pq = desc->pq; + + descs[0] = (struct ioat_raw_descriptor *) pq; + + desc->sed = ioat3_alloc_sed(ioat_dma, (src_cnt-2) >> 3); + if (!desc->sed) { + dev_err(to_dev(ioat_chan), + "%s: no free sed entries\n", __func__); + return NULL; + } + + pq->sed_addr = desc->sed->dma; + desc->sed->parent = desc; + + descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw; + descs[2] = (void *)descs[1] + 64; + + for (s = 0; s < src_cnt; s++) + pq16_set_src(descs, src[s], offset, scf[s], s); + + /* see the comment for dma_maxpq in include/linux/dmaengine.h */ + if (dmaf_p_disabled_continue(flags)) + pq16_set_src(descs, dst[1], offset, 1, s++); + else if (dmaf_continue(flags)) { + pq16_set_src(descs, dst[0], offset, 0, s++); + pq16_set_src(descs, dst[1], offset, 1, s++); + pq16_set_src(descs, dst[1], offset, 0, s++); + } + + pq->size = xfer_size; + pq->p_addr = dst[0] + offset; + pq->q_addr = dst[1] + offset; + pq->ctl = 0; + pq->ctl_f.op = op; + pq->ctl_f.src_cnt = src16_cnt_to_hw(s); + /* we turn on descriptor write back error status */ + if (ioat_dma->cap & IOAT_CAP_DWBES) + pq->ctl_f.wb_en = result ? 1 : 0; + pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); + pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); + + len -= xfer_size; + offset += xfer_size; + } while (++i < num_descs); + + /* last pq descriptor carries the unmap parameters and fence bit */ + desc->txd.flags = flags; + desc->len = total_len; + if (result) + desc->result = result; + pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); + + /* with cb3.3 we should be able to do completion w/o a null desc */ + pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + pq->ctl_f.compl_write = 1; + + dump_pq16_desc_dbg(ioat_chan, desc); + + /* we leave the channel locked to ensure in order submission */ + return &desc->txd; +} + +static int src_cnt_flags(unsigned int src_cnt, unsigned long flags) +{ + if (dmaf_p_disabled_continue(flags)) + return src_cnt + 1; + else if (dmaf_continue(flags)) + return src_cnt + 3; + else + return src_cnt; +} + +struct dma_async_tx_descriptor * +ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, size_t len, + unsigned long flags) +{ + /* specify valid address for disabled result */ + if (flags & DMA_PREP_PQ_DISABLE_P) + dst[0] = dst[1]; + if (flags & DMA_PREP_PQ_DISABLE_Q) + dst[1] = dst[0]; + + /* handle the single source multiply case from the raid6 + * recovery path + */ + if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) { + dma_addr_t single_source[2]; + unsigned char single_source_coef[2]; + + BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q); + single_source[0] = src[0]; + single_source[1] = src[0]; + single_source_coef[0] = scf[0]; + single_source_coef[1] = 0; + + return src_cnt_flags(src_cnt, flags) > 8 ? + __ioat_prep_pq16_lock(chan, NULL, dst, single_source, + 2, single_source_coef, len, + flags) : + __ioat_prep_pq_lock(chan, NULL, dst, single_source, 2, + single_source_coef, len, flags); + + } else { + return src_cnt_flags(src_cnt, flags) > 8 ? + __ioat_prep_pq16_lock(chan, NULL, dst, src, src_cnt, + scf, len, flags) : + __ioat_prep_pq_lock(chan, NULL, dst, src, src_cnt, + scf, len, flags); + } +} + +struct dma_async_tx_descriptor * +ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, size_t len, + enum sum_check_flags *pqres, unsigned long flags) +{ + /* specify valid address for disabled result */ + if (flags & DMA_PREP_PQ_DISABLE_P) + pq[0] = pq[1]; + if (flags & DMA_PREP_PQ_DISABLE_Q) + pq[1] = pq[0]; + + /* the cleanup routine only sets bits on validate failure, it + * does not clear bits on validate success... so clear it here + */ + *pqres = 0; + + return src_cnt_flags(src_cnt, flags) > 8 ? + __ioat_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, + flags) : + __ioat_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, + flags); +} + +struct dma_async_tx_descriptor * +ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, + unsigned int src_cnt, size_t len, unsigned long flags) +{ + unsigned char scf[MAX_SCF]; + dma_addr_t pq[2]; + + if (src_cnt > MAX_SCF) + return NULL; + + memset(scf, 0, src_cnt); + pq[0] = dst; + flags |= DMA_PREP_PQ_DISABLE_Q; + pq[1] = dst; /* specify valid address for disabled result */ + + return src_cnt_flags(src_cnt, flags) > 8 ? + __ioat_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, + flags) : + __ioat_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, + flags); +} + +struct dma_async_tx_descriptor * +ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, + unsigned int src_cnt, size_t len, + enum sum_check_flags *result, unsigned long flags) +{ + unsigned char scf[MAX_SCF]; + dma_addr_t pq[2]; + + if (src_cnt > MAX_SCF) + return NULL; + + /* the cleanup routine only sets bits on validate failure, it + * does not clear bits on validate success... so clear it here + */ + *result = 0; + + memset(scf, 0, src_cnt); + pq[0] = src[0]; + flags |= DMA_PREP_PQ_DISABLE_Q; + pq[1] = pq[0]; /* specify valid address for disabled result */ + + return src_cnt_flags(src_cnt, flags) > 8 ? + __ioat_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, + scf, len, flags) : + __ioat_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, + scf, len, flags); +} + +struct dma_async_tx_descriptor * +ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + struct ioat_ring_ent *desc; + struct ioat_dma_descriptor *hw; + + if (ioat_check_space_lock(ioat_chan, 1) == 0) + desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head); + else + return NULL; + + hw = desc->hw; + hw->ctl = 0; + hw->ctl_f.null = 1; + hw->ctl_f.int_en = 1; + hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); + hw->ctl_f.compl_write = 1; + hw->size = NULL_DESC_BUFFER_SIZE; + hw->src_addr = 0; + hw->dst_addr = 0; + + desc->txd.flags = flags; + desc->len = 1; + + dump_desc_dbg(ioat_chan, desc); + + /* we leave the channel locked to ensure in order submission */ + return &desc->txd; +} + diff --git a/drivers/dma/ioat/sysfs.c b/drivers/dma/ioat/sysfs.c new file mode 100644 index 000000000..cb4a857ee --- /dev/null +++ b/drivers/dma/ioat/sysfs.c @@ -0,0 +1,135 @@ +/* + * Intel I/OAT DMA Linux driver + * Copyright(c) 2004 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include +#include +#include +#include +#include "dma.h" +#include "registers.h" +#include "hw.h" + +#include "../dmaengine.h" + +static ssize_t cap_show(struct dma_chan *c, char *page) +{ + struct dma_device *dma = c->device; + + return sprintf(page, "copy%s%s%s%s%s\n", + dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "", + dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "", + dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "", + dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "", + dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : ""); + +} +struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap); + +static ssize_t version_show(struct dma_chan *c, char *page) +{ + struct dma_device *dma = c->device; + struct ioatdma_device *ioat_dma = to_ioatdma_device(dma); + + return sprintf(page, "%d.%d\n", + ioat_dma->version >> 4, ioat_dma->version & 0xf); +} +struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version); + +static ssize_t +ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page) +{ + struct ioat_sysfs_entry *entry; + struct ioatdma_chan *ioat_chan; + + entry = container_of(attr, struct ioat_sysfs_entry, attr); + ioat_chan = container_of(kobj, struct ioatdma_chan, kobj); + + if (!entry->show) + return -EIO; + return entry->show(&ioat_chan->dma_chan, page); +} + +const struct sysfs_ops ioat_sysfs_ops = { + .show = ioat_attr_show, +}; + +void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type) +{ + struct dma_device *dma = &ioat_dma->dma_dev; + struct dma_chan *c; + + list_for_each_entry(c, &dma->channels, device_node) { + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + struct kobject *parent = &c->dev->device.kobj; + int err; + + err = kobject_init_and_add(&ioat_chan->kobj, type, + parent, "quickdata"); + if (err) { + dev_warn(to_dev(ioat_chan), + "sysfs init error (%d), continuing...\n", err); + kobject_put(&ioat_chan->kobj); + set_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state); + } + } +} + +void ioat_kobject_del(struct ioatdma_device *ioat_dma) +{ + struct dma_device *dma = &ioat_dma->dma_dev; + struct dma_chan *c; + + list_for_each_entry(c, &dma->channels, device_node) { + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + + if (!test_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state)) { + kobject_del(&ioat_chan->kobj); + kobject_put(&ioat_chan->kobj); + } + } +} + +static ssize_t ring_size_show(struct dma_chan *c, char *page) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + + return sprintf(page, "%d\n", (1 << ioat_chan->alloc_order) & ~1); +} +static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size); + +static ssize_t ring_active_show(struct dma_chan *c, char *page) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + + /* ...taken outside the lock, no need to be precise */ + return sprintf(page, "%d\n", ioat_ring_active(ioat_chan)); +} +static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); + +static struct attribute *ioat_attrs[] = { + &ring_size_attr.attr, + &ring_active_attr.attr, + &ioat_cap_attr.attr, + &ioat_version_attr.attr, + NULL, +}; + +struct kobj_type ioat_ktype = { + .sysfs_ops = &ioat_sysfs_ops, + .default_attrs = ioat_attrs, +}; diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 998826854..e4f43125e 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -1300,10 +1300,11 @@ static int iop_adma_probe(struct platform_device *pdev) * note: writecombine gives slightly better performance, but * requires that we explicitly flush the writes */ - if ((adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, - plat_data->pool_size, - &adev->dma_desc_pool, - GFP_KERNEL)) == NULL) { + adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, + plat_data->pool_size, + &adev->dma_desc_pool, + GFP_KERNEL); + if (!adev->dma_desc_pool_virt) { ret = -ENOMEM; goto err_free_adev; } diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c index 2e284a443..2bf37e68a 100644 --- a/drivers/dma/ipu/ipu_irq.c +++ b/drivers/dma/ipu/ipu_irq.c @@ -265,10 +265,10 @@ int ipu_irq_unmap(unsigned int source) return ret; } -/* Chained IRQ handler for IPU error interrupt */ -static void ipu_irq_err(unsigned int irq, struct irq_desc *desc) +/* Chained IRQ handler for IPU function and error interrupt */ +static void ipu_irq_handler(struct irq_desc *desc) { - struct ipu *ipu = irq_get_handler_data(irq); + struct ipu *ipu = irq_desc_get_handler_data(desc); u32 status; int i, line; @@ -286,43 +286,7 @@ static void ipu_irq_err(unsigned int irq, struct irq_desc *desc) raw_spin_unlock(&bank_lock); while ((line = ffs(status))) { struct ipu_irq_map *map; - - line--; - status &= ~(1UL << line); - - raw_spin_lock(&bank_lock); - map = src2map(32 * i + line); - if (map) - irq = map->irq; - raw_spin_unlock(&bank_lock); - - if (!map) { - pr_err("IPU: Interrupt on unmapped source %u bank %d\n", - line, i); - continue; - } - generic_handle_irq(irq); - } - } -} - -/* Chained IRQ handler for IPU function interrupt */ -static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc) -{ - struct ipu *ipu = irq_desc_get_handler_data(desc); - u32 status; - int i, line; - - for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) { - struct ipu_irq_bank *bank = irq_bank + i; - - raw_spin_lock(&bank_lock); - status = ipu_read_reg(ipu, bank->status); - /* Not clearing all interrupts, see above */ - status &= ipu_read_reg(ipu, bank->control); - raw_spin_unlock(&bank_lock); - while ((line = ffs(status))) { - struct ipu_irq_map *map; + unsigned int irq = NO_IRQ; line--; status &= ~(1UL << line); @@ -377,16 +341,12 @@ int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev) irq_map[i].irq = irq; irq_map[i].source = -EINVAL; irq_set_handler(irq, handle_level_irq); -#ifdef CONFIG_ARM - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); -#endif + irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } - irq_set_handler_data(ipu->irq_fn, ipu); - irq_set_chained_handler(ipu->irq_fn, ipu_irq_fn); + irq_set_chained_handler_and_data(ipu->irq_fn, ipu_irq_handler, ipu); - irq_set_handler_data(ipu->irq_err, ipu); - irq_set_chained_handler(ipu->irq_err, ipu_irq_err); + irq_set_chained_handler_and_data(ipu->irq_err, ipu_irq_handler, ipu); ipu->irq_base = irq_base; @@ -399,16 +359,12 @@ void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev) irq_base = ipu->irq_base; - irq_set_chained_handler(ipu->irq_fn, NULL); - irq_set_handler_data(ipu->irq_fn, NULL); + irq_set_chained_handler_and_data(ipu->irq_fn, NULL, NULL); - irq_set_chained_handler(ipu->irq_err, NULL); - irq_set_handler_data(ipu->irq_err, NULL); + irq_set_chained_handler_and_data(ipu->irq_err, NULL, NULL); for (irq = irq_base; irq < irq_base + CONFIG_MX3_IPU_IRQS; irq++) { -#ifdef CONFIG_ARM - set_irq_flags(irq, 0); -#endif + irq_set_status_flags(irq, IRQ_NOREQUEST); irq_set_chip(irq, NULL); irq_set_chip_data(irq, NULL); } diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 647e362f0..1ba2fd738 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -24,7 +24,6 @@ #include "virt-dma.h" #define DRIVER_NAME "k3-dma" -#define DMA_ALIGN 3 #define DMA_MAX_SIZE 0x1ffc #define INT_STAT 0x00 @@ -732,7 +731,7 @@ static int k3_dma_probe(struct platform_device *op) d->slave.device_pause = k3_dma_transfer_pause; d->slave.device_resume = k3_dma_transfer_resume; d->slave.device_terminate_all = k3_dma_terminate_all; - d->slave.copy_align = DMA_ALIGN; + d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES; /* init virtual channel */ d->chans = devm_kzalloc(&op->dev, diff --git a/drivers/dma/lpc18xx-dmamux.c b/drivers/dma/lpc18xx-dmamux.c new file mode 100644 index 000000000..761f32687 --- /dev/null +++ b/drivers/dma/lpc18xx-dmamux.c @@ -0,0 +1,183 @@ +/* + * DMA Router driver for LPC18xx/43xx DMA MUX + * + * Copyright (C) 2015 Joachim Eastwood + * + * Based on TI DMA Crossbar driver by: + * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +/* CREG register offset and macros for mux manipulation */ +#define LPC18XX_CREG_DMAMUX 0x11c +#define LPC18XX_DMAMUX_VAL(v, n) ((v) << (n * 2)) +#define LPC18XX_DMAMUX_MASK(n) (0x3 << (n * 2)) +#define LPC18XX_DMAMUX_MAX_VAL 0x3 + +struct lpc18xx_dmamux { + u32 value; + bool busy; +}; + +struct lpc18xx_dmamux_data { + struct dma_router dmarouter; + struct lpc18xx_dmamux *muxes; + u32 dma_master_requests; + u32 dma_mux_requests; + struct regmap *reg; + spinlock_t lock; +}; + +static void lpc18xx_dmamux_free(struct device *dev, void *route_data) +{ + struct lpc18xx_dmamux_data *dmamux = dev_get_drvdata(dev); + struct lpc18xx_dmamux *mux = route_data; + unsigned long flags; + + spin_lock_irqsave(&dmamux->lock, flags); + mux->busy = false; + spin_unlock_irqrestore(&dmamux->lock, flags); +} + +static void *lpc18xx_dmamux_reserve(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); + struct lpc18xx_dmamux_data *dmamux = platform_get_drvdata(pdev); + unsigned long flags; + unsigned mux; + + if (dma_spec->args_count != 3) { + dev_err(&pdev->dev, "invalid number of dma mux args\n"); + return ERR_PTR(-EINVAL); + } + + mux = dma_spec->args[0]; + if (mux >= dmamux->dma_master_requests) { + dev_err(&pdev->dev, "invalid mux number: %d\n", + dma_spec->args[0]); + return ERR_PTR(-EINVAL); + } + + if (dma_spec->args[1] > LPC18XX_DMAMUX_MAX_VAL) { + dev_err(&pdev->dev, "invalid dma mux value: %d\n", + dma_spec->args[1]); + return ERR_PTR(-EINVAL); + } + + /* The of_node_put() will be done in the core for the node */ + dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); + if (!dma_spec->np) { + dev_err(&pdev->dev, "can't get dma master\n"); + return ERR_PTR(-EINVAL); + } + + spin_lock_irqsave(&dmamux->lock, flags); + if (dmamux->muxes[mux].busy) { + spin_unlock_irqrestore(&dmamux->lock, flags); + dev_err(&pdev->dev, "dma request %u busy with %u.%u\n", + mux, mux, dmamux->muxes[mux].value); + of_node_put(dma_spec->np); + return ERR_PTR(-EBUSY); + } + + dmamux->muxes[mux].busy = true; + dmamux->muxes[mux].value = dma_spec->args[1]; + + regmap_update_bits(dmamux->reg, LPC18XX_CREG_DMAMUX, + LPC18XX_DMAMUX_MASK(mux), + LPC18XX_DMAMUX_VAL(dmamux->muxes[mux].value, mux)); + spin_unlock_irqrestore(&dmamux->lock, flags); + + dma_spec->args[1] = dma_spec->args[2]; + dma_spec->args_count = 2; + + dev_dbg(&pdev->dev, "mapping dmamux %u.%u to dma request %u\n", mux, + dmamux->muxes[mux].value, mux); + + return &dmamux->muxes[mux]; +} + +static int lpc18xx_dmamux_probe(struct platform_device *pdev) +{ + struct device_node *dma_np, *np = pdev->dev.of_node; + struct lpc18xx_dmamux_data *dmamux; + int ret; + + dmamux = devm_kzalloc(&pdev->dev, sizeof(*dmamux), GFP_KERNEL); + if (!dmamux) + return -ENOMEM; + + dmamux->reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg"); + if (IS_ERR(dmamux->reg)) { + dev_err(&pdev->dev, "syscon lookup failed\n"); + return PTR_ERR(dmamux->reg); + } + + ret = of_property_read_u32(np, "dma-requests", + &dmamux->dma_mux_requests); + if (ret) { + dev_err(&pdev->dev, "missing dma-requests property\n"); + return ret; + } + + dma_np = of_parse_phandle(np, "dma-masters", 0); + if (!dma_np) { + dev_err(&pdev->dev, "can't get dma master\n"); + return -ENODEV; + } + + ret = of_property_read_u32(dma_np, "dma-requests", + &dmamux->dma_master_requests); + of_node_put(dma_np); + if (ret) { + dev_err(&pdev->dev, "missing master dma-requests property\n"); + return ret; + } + + dmamux->muxes = devm_kcalloc(&pdev->dev, dmamux->dma_master_requests, + sizeof(struct lpc18xx_dmamux), + GFP_KERNEL); + if (!dmamux->muxes) + return -ENOMEM; + + spin_lock_init(&dmamux->lock); + platform_set_drvdata(pdev, dmamux); + dmamux->dmarouter.dev = &pdev->dev; + dmamux->dmarouter.route_free = lpc18xx_dmamux_free; + + return of_dma_router_register(np, lpc18xx_dmamux_reserve, + &dmamux->dmarouter); +} + +static const struct of_device_id lpc18xx_dmamux_match[] = { + { .compatible = "nxp,lpc1850-dmamux" }, + {}, +}; + +static struct platform_driver lpc18xx_dmamux_driver = { + .probe = lpc18xx_dmamux_probe, + .driver = { + .name = "lpc18xx-dmamux", + .of_match_table = lpc18xx_dmamux_match, + }, +}; + +static int __init lpc18xx_dmamux_init(void) +{ + return platform_driver_register(&lpc18xx_dmamux_driver); +} +arch_initcall(lpc18xx_dmamux_init); diff --git a/drivers/dma/mic_x100_dma.h b/drivers/dma/mic_x100_dma.h index f663b0bdd..d89982034 100644 --- a/drivers/dma/mic_x100_dma.h +++ b/drivers/dma/mic_x100_dma.h @@ -39,7 +39,7 @@ */ #define MIC_DMA_MAX_NUM_CHAN 8 #define MIC_DMA_NUM_CHAN 4 -#define MIC_DMA_ALIGN_SHIFT 6 +#define MIC_DMA_ALIGN_SHIFT DMAENGINE_ALIGN_64_BYTES #define MIC_DMA_ALIGN_BYTES (1 << MIC_DMA_ALIGN_SHIFT) #define MIC_DMA_DESC_RX_SIZE (128 * 1024 - 4) diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index 462a0229a..e39457f13 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -72,7 +72,6 @@ #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ -#define PDMA_ALIGNMENT 3 #define PDMA_MAX_DESC_BYTES DCMD_LENGTH struct mmp_pdma_desc_hw { @@ -1071,7 +1070,7 @@ static int mmp_pdma_probe(struct platform_device *op) pdev->device.device_issue_pending = mmp_pdma_issue_pending; pdev->device.device_config = mmp_pdma_config; pdev->device.device_terminate_all = mmp_pdma_terminate_all; - pdev->device.copy_align = PDMA_ALIGNMENT; + pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES; pdev->device.src_addr_widths = widths; pdev->device.dst_addr_widths = widths; pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index e683761e0..3df042260 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -100,7 +100,6 @@ enum mmp_tdma_type { PXA910_SQU, }; -#define TDMA_ALIGNMENT 3 #define TDMA_MAX_XFER_BYTES SZ_64K struct mmp_tdma_chan { @@ -695,7 +694,7 @@ static int mmp_tdma_probe(struct platform_device *pdev) tdev->device.device_pause = mmp_tdma_pause_chan; tdev->device.device_resume = mmp_tdma_resume_chan; tdev->device.device_terminate_all = mmp_tdma_terminate_all; - tdev->device.copy_align = TDMA_ALIGNMENT; + tdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES; dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); platform_set_drvdata(pdev, tdev); diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index f1325f625..1c2de9a83 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -13,7 +13,6 @@ */ #include -#include #include #include #include @@ -26,6 +25,7 @@ #include #include #include +#include #include #include "dmaengine.h" @@ -1126,7 +1126,8 @@ static const struct of_device_id mv_xor_dt_ids[] = { { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC }, {}, }; -MODULE_DEVICE_TABLE(of, mv_xor_dt_ids); + +static unsigned int mv_xor_engine_count; static int mv_xor_probe(struct platform_device *pdev) { @@ -1134,6 +1135,7 @@ static int mv_xor_probe(struct platform_device *pdev) struct mv_xor_device *xordev; struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev); struct resource *res; + unsigned int max_engines, max_channels; int i, ret; int op_in_desc; @@ -1177,6 +1179,21 @@ static int mv_xor_probe(struct platform_device *pdev) if (!IS_ERR(xordev->clk)) clk_prepare_enable(xordev->clk); + /* + * We don't want to have more than one channel per CPU in + * order for async_tx to perform well. So we limit the number + * of engines and channels so that we take into account this + * constraint. Note that we also want to use channels from + * separate engines when possible. + */ + max_engines = num_present_cpus(); + max_channels = min_t(unsigned int, + MV_XOR_MAX_CHANNELS, + DIV_ROUND_UP(num_present_cpus(), 2)); + + if (mv_xor_engine_count >= max_engines) + return 0; + if (pdev->dev.of_node) { struct device_node *np; int i = 0; @@ -1190,13 +1207,13 @@ static int mv_xor_probe(struct platform_device *pdev) int irq; op_in_desc = (int)of_id->data; + if (i >= max_channels) + continue; + dma_cap_zero(cap_mask); - if (of_property_read_bool(np, "dmacap,memcpy")) - dma_cap_set(DMA_MEMCPY, cap_mask); - if (of_property_read_bool(np, "dmacap,xor")) - dma_cap_set(DMA_XOR, cap_mask); - if (of_property_read_bool(np, "dmacap,interrupt")) - dma_cap_set(DMA_INTERRUPT, cap_mask); + dma_cap_set(DMA_MEMCPY, cap_mask); + dma_cap_set(DMA_XOR, cap_mask); + dma_cap_set(DMA_INTERRUPT, cap_mask); irq = irq_of_parse_and_map(np, 0); if (!irq) { @@ -1216,7 +1233,7 @@ static int mv_xor_probe(struct platform_device *pdev) i++; } } else if (pdata && pdata->channels) { - for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { + for (i = 0; i < max_channels; i++) { struct mv_xor_channel_data *cd; struct mv_xor_chan *chan; int irq; @@ -1263,27 +1280,8 @@ err_channel_add: return ret; } -static int mv_xor_remove(struct platform_device *pdev) -{ - struct mv_xor_device *xordev = platform_get_drvdata(pdev); - int i; - - for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { - if (xordev->channels[i]) - mv_xor_channel_remove(xordev->channels[i]); - } - - if (!IS_ERR(xordev->clk)) { - clk_disable_unprepare(xordev->clk); - clk_put(xordev->clk); - } - - return 0; -} - static struct platform_driver mv_xor_driver = { .probe = mv_xor_probe, - .remove = mv_xor_remove, .driver = { .name = MV_XOR_NAME, .of_match_table = of_match_ptr(mv_xor_dt_ids), @@ -1295,19 +1293,10 @@ static int __init mv_xor_init(void) { return platform_driver_register(&mv_xor_driver); } -module_init(mv_xor_init); - -/* it's currently unsafe to unload this module */ -#if 0 -static void __exit mv_xor_exit(void) -{ - platform_driver_unregister(&mv_xor_driver); - return; -} - -module_exit(mv_xor_exit); -#endif +device_initcall(mv_xor_init); +/* MODULE_AUTHOR("Saeed Bishara "); MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); MODULE_LICENSE("GPL"); +*/ diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index b859792dd..113605f6f 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -11,10 +11,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index ecab4ea05..17ee758b4 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -1198,6 +1198,9 @@ static inline int _loop(unsigned dry_run, u8 buf[], unsigned lcnt0, lcnt1, ljmp0, ljmp1; struct _arg_LPEND lpend; + if (*bursts == 1) + return _bursts(dry_run, buf, pxs, 1); + /* Max iterations possible in DMALP is 256 */ if (*bursts >= 256*256) { lcnt1 = 256; diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 95bdbbe2a..fc4156afa 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -184,19 +184,18 @@ static unsigned int pxad_drcmr(unsigned int line) static int dbg_show_requester_chan(struct seq_file *s, void *p) { - int pos = 0; struct pxad_phy *phy = s->private; int i; u32 drcmr; - pos += seq_printf(s, "DMA channel %d requester :\n", phy->idx); + seq_printf(s, "DMA channel %d requester :\n", phy->idx); for (i = 0; i < 70; i++) { drcmr = readl_relaxed(phy->base + pxad_drcmr(i)); if ((drcmr & DRCMR_CHLNUM) == phy->idx) - pos += seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i, - !!(drcmr & DRCMR_MAPVLD)); + seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i, + !!(drcmr & DRCMR_MAPVLD)); } - return pos; + return 0; } static inline int dbg_burst_from_dcmd(u32 dcmd) @@ -474,8 +473,10 @@ static void pxad_free_phy(struct pxad_chan *chan) return; /* clear the channel mapping in DRCMR */ - reg = pxad_drcmr(chan->drcmr); - writel_relaxed(0, chan->phy->base + reg); + if (chan->drcmr <= DRCMR_CHLNUM) { + reg = pxad_drcmr(chan->drcmr); + writel_relaxed(0, chan->phy->base + reg); + } spin_lock_irqsave(&pdev->phy_lock, flags); for (i = 0; i < 32; i++) @@ -517,8 +518,10 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned) "%s(); phy=%p(%d) misaligned=%d\n", __func__, phy, phy->idx, misaligned); - reg = pxad_drcmr(phy->vchan->drcmr); - writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); + if (phy->vchan->drcmr <= DRCMR_CHLNUM) { + reg = pxad_drcmr(phy->vchan->drcmr); + writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); + } dalgn = phy_readl_relaxed(phy, DALGN); if (misaligned) @@ -907,21 +910,25 @@ static void pxad_get_config(struct pxad_chan *chan, enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; *dcmd = 0; - if (chan->cfg.direction == DMA_DEV_TO_MEM) { + if (dir == DMA_DEV_TO_MEM) { maxburst = chan->cfg.src_maxburst; width = chan->cfg.src_addr_width; dev_addr = chan->cfg.src_addr; *dev_src = dev_addr; - *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC; + *dcmd |= PXA_DCMD_INCTRGADDR; + if (chan->drcmr <= DRCMR_CHLNUM) + *dcmd |= PXA_DCMD_FLOWSRC; } - if (chan->cfg.direction == DMA_MEM_TO_DEV) { + if (dir == DMA_MEM_TO_DEV) { maxburst = chan->cfg.dst_maxburst; width = chan->cfg.dst_addr_width; dev_addr = chan->cfg.dst_addr; *dev_dst = dev_addr; - *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG; + *dcmd |= PXA_DCMD_INCSRCADDR; + if (chan->drcmr <= DRCMR_CHLNUM) + *dcmd |= PXA_DCMD_FLOWTRG; } - if (chan->cfg.direction == DMA_MEM_TO_MEM) + if (dir == DMA_MEM_TO_MEM) *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR | PXA_DCMD_INCSRCADDR; @@ -1179,6 +1186,16 @@ static unsigned int pxad_residue(struct pxad_chan *chan, else curr = phy_readl_relaxed(chan->phy, DTADR); + /* + * curr has to be actually read before checking descriptor + * completion, so that a curr inside a status updater + * descriptor implies the following test returns true, and + * preventing reordering of curr load and the test. + */ + rmb(); + if (is_desc_completed(vd)) + goto out; + for (i = 0; i < sw_desc->nb_desc - 1; i++) { hw_desc = sw_desc->hw_desc[i]; if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index 0f371524a..9fda65af8 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig @@ -39,18 +39,6 @@ config SH_DMAE_R8A73A4 endif -config SUDMAC - tristate "Renesas SUDMAC support" - depends on SH_DMAE_BASE - help - Enable support for the Renesas SUDMAC controllers. - -config RCAR_HPB_DMAE - tristate "Renesas R-Car HPB DMAC support" - depends on SH_DMAE_BASE - help - Enable support for the Renesas R-Car series DMA controllers. - config RCAR_DMAC tristate "Renesas R-Car Gen2 DMA Controller" depends on ARCH_SHMOBILE || COMPILE_TEST @@ -59,6 +47,12 @@ config RCAR_DMAC This driver supports the general purpose DMA controller found in the Renesas R-Car second generation SoCs. +config RCAR_HPB_DMAE + tristate "Renesas R-Car HPB DMAC support" + depends on SH_DMAE_BASE + help + Enable support for the Renesas R-Car series DMA controllers. + config RENESAS_USB_DMAC tristate "Renesas USB-DMA Controller" depends on ARCH_SHMOBILE || COMPILE_TEST @@ -67,3 +61,9 @@ config RENESAS_USB_DMAC help This driver supports the USB-DMA controller found in the Renesas SoCs. + +config SUDMAC + tristate "Renesas SUDMAC support" + depends on SH_DMAE_BASE + help + Enable support for the Renesas SUDMAC controllers. diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index b8a598066..0133e4658 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile @@ -13,7 +13,7 @@ shdma-$(CONFIG_SH_DMAE_R8A73A4) += shdma-r8a73a4.o shdma-objs := $(shdma-y) obj-$(CONFIG_SH_DMAE) += shdma.o -obj-$(CONFIG_SUDMAC) += sudmac.o -obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o +obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o +obj-$(CONFIG_SUDMAC) += sudmac.o diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 8c5186cc9..7d5598d87 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -455,6 +455,7 @@ static int sirfsoc_dma_terminate_all(struct dma_chan *chan) switch (sdma->type) { case SIRFSOC_DMA_VER_A7V1: writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR); + writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_INT); writel_relaxed((1 << cid) | 1 << (cid + 16), sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7); @@ -462,6 +463,8 @@ static int sirfsoc_dma_terminate_all(struct dma_chan *chan) break; case SIRFSOC_DMA_VER_A7V2: writel_relaxed(0, sdma->base + SIRFSOC_DMA_INT_EN_ATLAS7); + writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7, + sdma->base + SIRFSOC_DMA_INT_ATLAS7); writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); writel_relaxed(0, sdma->base + SIRFSOC_DMA_VALID_ATLAS7); break; diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 3c10f034d..750d1b313 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2853,7 +2853,7 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev) * This controller can only access address at even * 32bit boundaries, i.e. 2^2 */ - dev->copy_align = 2; + dev->copy_align = DMAENGINE_ALIGN_4_BYTES; } if (dma_has_cap(DMA_SG, dev->cap_mask)) diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c new file mode 100644 index 000000000..1661d5182 --- /dev/null +++ b/drivers/dma/sun4i-dma.c @@ -0,0 +1,1288 @@ +/* + * Copyright (C) 2014 Emilio López + * Emilio López + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "virt-dma.h" + +/** Common macros to normal and dedicated DMA registers **/ + +#define SUN4I_DMA_CFG_LOADING BIT(31) +#define SUN4I_DMA_CFG_DST_DATA_WIDTH(width) ((width) << 25) +#define SUN4I_DMA_CFG_DST_BURST_LENGTH(len) ((len) << 23) +#define SUN4I_DMA_CFG_DST_ADDR_MODE(mode) ((mode) << 21) +#define SUN4I_DMA_CFG_DST_DRQ_TYPE(type) ((type) << 16) +#define SUN4I_DMA_CFG_SRC_DATA_WIDTH(width) ((width) << 9) +#define SUN4I_DMA_CFG_SRC_BURST_LENGTH(len) ((len) << 7) +#define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode) ((mode) << 5) +#define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type) (type) + +/** Normal DMA register values **/ + +/* Normal DMA source/destination data request type values */ +#define SUN4I_NDMA_DRQ_TYPE_SDRAM 0x16 +#define SUN4I_NDMA_DRQ_TYPE_LIMIT (0x1F + 1) + +/** Normal DMA register layout **/ + +/* Dedicated DMA source/destination address mode values */ +#define SUN4I_NDMA_ADDR_MODE_LINEAR 0 +#define SUN4I_NDMA_ADDR_MODE_IO 1 + +/* Normal DMA configuration register layout */ +#define SUN4I_NDMA_CFG_CONT_MODE BIT(30) +#define SUN4I_NDMA_CFG_WAIT_STATE(n) ((n) << 27) +#define SUN4I_NDMA_CFG_DST_NON_SECURE BIT(22) +#define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15) +#define SUN4I_NDMA_CFG_SRC_NON_SECURE BIT(6) + +/** Dedicated DMA register values **/ + +/* Dedicated DMA source/destination address mode values */ +#define SUN4I_DDMA_ADDR_MODE_LINEAR 0 +#define SUN4I_DDMA_ADDR_MODE_IO 1 +#define SUN4I_DDMA_ADDR_MODE_HORIZONTAL_PAGE 2 +#define SUN4I_DDMA_ADDR_MODE_VERTICAL_PAGE 3 + +/* Dedicated DMA source/destination data request type values */ +#define SUN4I_DDMA_DRQ_TYPE_SDRAM 0x1 +#define SUN4I_DDMA_DRQ_TYPE_LIMIT (0x1F + 1) + +/** Dedicated DMA register layout **/ + +/* Dedicated DMA configuration register layout */ +#define SUN4I_DDMA_CFG_BUSY BIT(30) +#define SUN4I_DDMA_CFG_CONT_MODE BIT(29) +#define SUN4I_DDMA_CFG_DST_NON_SECURE BIT(28) +#define SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15) +#define SUN4I_DDMA_CFG_SRC_NON_SECURE BIT(12) + +/* Dedicated DMA parameter register layout */ +#define SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(n) (((n) - 1) << 24) +#define SUN4I_DDMA_PARA_DST_WAIT_CYCLES(n) (((n) - 1) << 16) +#define SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(n) (((n) - 1) << 8) +#define SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(n) (((n) - 1) << 0) + +/** DMA register offsets **/ + +/* General register offsets */ +#define SUN4I_DMA_IRQ_ENABLE_REG 0x0 +#define SUN4I_DMA_IRQ_PENDING_STATUS_REG 0x4 + +/* Normal DMA register offsets */ +#define SUN4I_NDMA_CHANNEL_REG_BASE(n) (0x100 + (n) * 0x20) +#define SUN4I_NDMA_CFG_REG 0x0 +#define SUN4I_NDMA_SRC_ADDR_REG 0x4 +#define SUN4I_NDMA_DST_ADDR_REG 0x8 +#define SUN4I_NDMA_BYTE_COUNT_REG 0xC + +/* Dedicated DMA register offsets */ +#define SUN4I_DDMA_CHANNEL_REG_BASE(n) (0x300 + (n) * 0x20) +#define SUN4I_DDMA_CFG_REG 0x0 +#define SUN4I_DDMA_SRC_ADDR_REG 0x4 +#define SUN4I_DDMA_DST_ADDR_REG 0x8 +#define SUN4I_DDMA_BYTE_COUNT_REG 0xC +#define SUN4I_DDMA_PARA_REG 0x18 + +/** DMA Driver **/ + +/* + * Normal DMA has 8 channels, and Dedicated DMA has another 8, so + * that's 16 channels. As for endpoints, there's 29 and 21 + * respectively. Given that the Normal DMA endpoints (other than + * SDRAM) can be used as tx/rx, we need 78 vchans in total + */ +#define SUN4I_NDMA_NR_MAX_CHANNELS 8 +#define SUN4I_DDMA_NR_MAX_CHANNELS 8 +#define SUN4I_DMA_NR_MAX_CHANNELS \ + (SUN4I_NDMA_NR_MAX_CHANNELS + SUN4I_DDMA_NR_MAX_CHANNELS) +#define SUN4I_NDMA_NR_MAX_VCHANS (29 * 2 - 1) +#define SUN4I_DDMA_NR_MAX_VCHANS 21 +#define SUN4I_DMA_NR_MAX_VCHANS \ + (SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS) + +/* This set of SUN4I_DDMA timing parameters were found experimentally while + * working with the SPI driver and seem to make it behave correctly */ +#define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \ + (SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(1) | \ + SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(1) | \ + SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) | \ + SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2)) + +struct sun4i_dma_pchan { + /* Register base of channel */ + void __iomem *base; + /* vchan currently being serviced */ + struct sun4i_dma_vchan *vchan; + /* Is this a dedicated pchan? */ + int is_dedicated; +}; + +struct sun4i_dma_vchan { + struct virt_dma_chan vc; + struct dma_slave_config cfg; + struct sun4i_dma_pchan *pchan; + struct sun4i_dma_promise *processing; + struct sun4i_dma_contract *contract; + u8 endpoint; + int is_dedicated; +}; + +struct sun4i_dma_promise { + u32 cfg; + u32 para; + dma_addr_t src; + dma_addr_t dst; + size_t len; + struct list_head list; +}; + +/* A contract is a set of promises */ +struct sun4i_dma_contract { + struct virt_dma_desc vd; + struct list_head demands; + struct list_head completed_demands; + int is_cyclic; +}; + +struct sun4i_dma_dev { + DECLARE_BITMAP(pchans_used, SUN4I_DMA_NR_MAX_CHANNELS); + struct dma_device slave; + struct sun4i_dma_pchan *pchans; + struct sun4i_dma_vchan *vchans; + void __iomem *base; + struct clk *clk; + int irq; + spinlock_t lock; +}; + +static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev) +{ + return container_of(dev, struct sun4i_dma_dev, slave); +} + +static struct sun4i_dma_vchan *to_sun4i_dma_vchan(struct dma_chan *chan) +{ + return container_of(chan, struct sun4i_dma_vchan, vc.chan); +} + +static struct sun4i_dma_contract *to_sun4i_dma_contract(struct virt_dma_desc *vd) +{ + return container_of(vd, struct sun4i_dma_contract, vd); +} + +static struct device *chan2dev(struct dma_chan *chan) +{ + return &chan->dev->device; +} + +static int convert_burst(u32 maxburst) +{ + if (maxburst > 8) + return -EINVAL; + + /* 1 -> 0, 4 -> 1, 8 -> 2 */ + return (maxburst >> 2); +} + +static int convert_buswidth(enum dma_slave_buswidth addr_width) +{ + if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) + return -EINVAL; + + /* 8 (1 byte) -> 0, 16 (2 bytes) -> 1, 32 (4 bytes) -> 2 */ + return (addr_width >> 1); +} + +static void sun4i_dma_free_chan_resources(struct dma_chan *chan) +{ + struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); + + vchan_free_chan_resources(&vchan->vc); +} + +static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv, + struct sun4i_dma_vchan *vchan) +{ + struct sun4i_dma_pchan *pchan = NULL, *pchans = priv->pchans; + unsigned long flags; + int i, max; + + /* + * pchans 0-SUN4I_NDMA_NR_MAX_CHANNELS are normal, and + * SUN4I_NDMA_NR_MAX_CHANNELS+ are dedicated ones + */ + if (vchan->is_dedicated) { + i = SUN4I_NDMA_NR_MAX_CHANNELS; + max = SUN4I_DMA_NR_MAX_CHANNELS; + } else { + i = 0; + max = SUN4I_NDMA_NR_MAX_CHANNELS; + } + + spin_lock_irqsave(&priv->lock, flags); + for_each_clear_bit_from(i, &priv->pchans_used, max) { + pchan = &pchans[i]; + pchan->vchan = vchan; + set_bit(i, priv->pchans_used); + break; + } + spin_unlock_irqrestore(&priv->lock, flags); + + return pchan; +} + +static void release_pchan(struct sun4i_dma_dev *priv, + struct sun4i_dma_pchan *pchan) +{ + unsigned long flags; + int nr = pchan - priv->pchans; + + spin_lock_irqsave(&priv->lock, flags); + + pchan->vchan = NULL; + clear_bit(nr, priv->pchans_used); + + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void configure_pchan(struct sun4i_dma_pchan *pchan, + struct sun4i_dma_promise *d) +{ + /* + * Configure addresses and misc parameters depending on type + * SUN4I_DDMA has an extra field with timing parameters + */ + if (pchan->is_dedicated) { + writel_relaxed(d->src, pchan->base + SUN4I_DDMA_SRC_ADDR_REG); + writel_relaxed(d->dst, pchan->base + SUN4I_DDMA_DST_ADDR_REG); + writel_relaxed(d->len, pchan->base + SUN4I_DDMA_BYTE_COUNT_REG); + writel_relaxed(d->para, pchan->base + SUN4I_DDMA_PARA_REG); + writel_relaxed(d->cfg, pchan->base + SUN4I_DDMA_CFG_REG); + } else { + writel_relaxed(d->src, pchan->base + SUN4I_NDMA_SRC_ADDR_REG); + writel_relaxed(d->dst, pchan->base + SUN4I_NDMA_DST_ADDR_REG); + writel_relaxed(d->len, pchan->base + SUN4I_NDMA_BYTE_COUNT_REG); + writel_relaxed(d->cfg, pchan->base + SUN4I_NDMA_CFG_REG); + } +} + +static void set_pchan_interrupt(struct sun4i_dma_dev *priv, + struct sun4i_dma_pchan *pchan, + int half, int end) +{ + u32 reg; + int pchan_number = pchan - priv->pchans; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + reg = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG); + + if (half) + reg |= BIT(pchan_number * 2); + else + reg &= ~BIT(pchan_number * 2); + + if (end) + reg |= BIT(pchan_number * 2 + 1); + else + reg &= ~BIT(pchan_number * 2 + 1); + + writel_relaxed(reg, priv->base + SUN4I_DMA_IRQ_ENABLE_REG); + + spin_unlock_irqrestore(&priv->lock, flags); +} + +/** + * Execute pending operations on a vchan + * + * When given a vchan, this function will try to acquire a suitable + * pchan and, if successful, will configure it to fulfill a promise + * from the next pending contract. + * + * This function must be called with &vchan->vc.lock held. + */ +static int __execute_vchan_pending(struct sun4i_dma_dev *priv, + struct sun4i_dma_vchan *vchan) +{ + struct sun4i_dma_promise *promise = NULL; + struct sun4i_dma_contract *contract = NULL; + struct sun4i_dma_pchan *pchan; + struct virt_dma_desc *vd; + int ret; + + lockdep_assert_held(&vchan->vc.lock); + + /* We need a pchan to do anything, so secure one if available */ + pchan = find_and_use_pchan(priv, vchan); + if (!pchan) + return -EBUSY; + + /* + * Channel endpoints must not be repeated, so if this vchan + * has already submitted some work, we can't do anything else + */ + if (vchan->processing) { + dev_dbg(chan2dev(&vchan->vc.chan), + "processing something to this endpoint already\n"); + ret = -EBUSY; + goto release_pchan; + } + + do { + /* Figure out which contract we're working with today */ + vd = vchan_next_desc(&vchan->vc); + if (!vd) { + dev_dbg(chan2dev(&vchan->vc.chan), + "No pending contract found"); + ret = 0; + goto release_pchan; + } + + contract = to_sun4i_dma_contract(vd); + if (list_empty(&contract->demands)) { + /* The contract has been completed so mark it as such */ + list_del(&contract->vd.node); + vchan_cookie_complete(&contract->vd); + dev_dbg(chan2dev(&vchan->vc.chan), + "Empty contract found and marked complete"); + } + } while (list_empty(&contract->demands)); + + /* Now find out what we need to do */ + promise = list_first_entry(&contract->demands, + struct sun4i_dma_promise, list); + vchan->processing = promise; + + /* ... and make it reality */ + if (promise) { + vchan->contract = contract; + vchan->pchan = pchan; + set_pchan_interrupt(priv, pchan, contract->is_cyclic, 1); + configure_pchan(pchan, promise); + } + + return 0; + +release_pchan: + release_pchan(priv, pchan); + return ret; +} + +static int sanitize_config(struct dma_slave_config *sconfig, + enum dma_transfer_direction direction) +{ + switch (direction) { + case DMA_MEM_TO_DEV: + if ((sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) || + !sconfig->dst_maxburst) + return -EINVAL; + + if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) + sconfig->src_addr_width = sconfig->dst_addr_width; + + if (!sconfig->src_maxburst) + sconfig->src_maxburst = sconfig->dst_maxburst; + + break; + + case DMA_DEV_TO_MEM: + if ((sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) || + !sconfig->src_maxburst) + return -EINVAL; + + if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) + sconfig->dst_addr_width = sconfig->src_addr_width; + + if (!sconfig->dst_maxburst) + sconfig->dst_maxburst = sconfig->src_maxburst; + + break; + default: + return 0; + } + + return 0; +} + +/** + * Generate a promise, to be used in a normal DMA contract. + * + * A NDMA promise contains all the information required to program the + * normal part of the DMA Engine and get data copied. A non-executed + * promise will live in the demands list on a contract. Once it has been + * completed, it will be moved to the completed demands list for later freeing. + * All linked promises will be freed when the corresponding contract is freed + */ +static struct sun4i_dma_promise * +generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest, + size_t len, struct dma_slave_config *sconfig, + enum dma_transfer_direction direction) +{ + struct sun4i_dma_promise *promise; + int ret; + + ret = sanitize_config(sconfig, direction); + if (ret) + return NULL; + + promise = kzalloc(sizeof(*promise), GFP_NOWAIT); + if (!promise) + return NULL; + + promise->src = src; + promise->dst = dest; + promise->len = len; + promise->cfg = SUN4I_DMA_CFG_LOADING | + SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN; + + dev_dbg(chan2dev(chan), + "src burst %d, dst burst %d, src buswidth %d, dst buswidth %d", + sconfig->src_maxburst, sconfig->dst_maxburst, + sconfig->src_addr_width, sconfig->dst_addr_width); + + /* Source burst */ + ret = convert_burst(sconfig->src_maxburst); + if (IS_ERR_VALUE(ret)) + goto fail; + promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret); + + /* Destination burst */ + ret = convert_burst(sconfig->dst_maxburst); + if (IS_ERR_VALUE(ret)) + goto fail; + promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret); + + /* Source bus width */ + ret = convert_buswidth(sconfig->src_addr_width); + if (IS_ERR_VALUE(ret)) + goto fail; + promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret); + + /* Destination bus width */ + ret = convert_buswidth(sconfig->dst_addr_width); + if (IS_ERR_VALUE(ret)) + goto fail; + promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret); + + return promise; + +fail: + kfree(promise); + return NULL; +} + +/** + * Generate a promise, to be used in a dedicated DMA contract. + * + * A DDMA promise contains all the information required to program the + * Dedicated part of the DMA Engine and get data copied. A non-executed + * promise will live in the demands list on a contract. Once it has been + * completed, it will be moved to the completed demands list for later freeing. + * All linked promises will be freed when the corresponding contract is freed + */ +static struct sun4i_dma_promise * +generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest, + size_t len, struct dma_slave_config *sconfig) +{ + struct sun4i_dma_promise *promise; + int ret; + + promise = kzalloc(sizeof(*promise), GFP_NOWAIT); + if (!promise) + return NULL; + + promise->src = src; + promise->dst = dest; + promise->len = len; + promise->cfg = SUN4I_DMA_CFG_LOADING | + SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN; + + /* Source burst */ + ret = convert_burst(sconfig->src_maxburst); + if (IS_ERR_VALUE(ret)) + goto fail; + promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret); + + /* Destination burst */ + ret = convert_burst(sconfig->dst_maxburst); + if (IS_ERR_VALUE(ret)) + goto fail; + promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret); + + /* Source bus width */ + ret = convert_buswidth(sconfig->src_addr_width); + if (IS_ERR_VALUE(ret)) + goto fail; + promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret); + + /* Destination bus width */ + ret = convert_buswidth(sconfig->dst_addr_width); + if (IS_ERR_VALUE(ret)) + goto fail; + promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret); + + return promise; + +fail: + kfree(promise); + return NULL; +} + +/** + * Generate a contract + * + * Contracts function as DMA descriptors. As our hardware does not support + * linked lists, we need to implement SG via software. We use a contract + * to hold all the pieces of the request and process them serially one + * after another. Each piece is represented as a promise. + */ +static struct sun4i_dma_contract *generate_dma_contract(void) +{ + struct sun4i_dma_contract *contract; + + contract = kzalloc(sizeof(*contract), GFP_NOWAIT); + if (!contract) + return NULL; + + INIT_LIST_HEAD(&contract->demands); + INIT_LIST_HEAD(&contract->completed_demands); + + return contract; +} + +/** + * Get next promise on a cyclic transfer + * + * Cyclic contracts contain a series of promises which are executed on a + * loop. This function returns the next promise from a cyclic contract, + * so it can be programmed into the hardware. + */ +static struct sun4i_dma_promise * +get_next_cyclic_promise(struct sun4i_dma_contract *contract) +{ + struct sun4i_dma_promise *promise; + + promise = list_first_entry_or_null(&contract->demands, + struct sun4i_dma_promise, list); + if (!promise) { + list_splice_init(&contract->completed_demands, + &contract->demands); + promise = list_first_entry(&contract->demands, + struct sun4i_dma_promise, list); + } + + return promise; +} + +/** + * Free a contract and all its associated promises + */ +static void sun4i_dma_free_contract(struct virt_dma_desc *vd) +{ + struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd); + struct sun4i_dma_promise *promise, *tmp; + + /* Free all the demands and completed demands */ + list_for_each_entry_safe(promise, tmp, &contract->demands, list) + kfree(promise); + + list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list) + kfree(promise); + + kfree(contract); +} + +static struct dma_async_tx_descriptor * +sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, + dma_addr_t src, size_t len, unsigned long flags) +{ + struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); + struct dma_slave_config *sconfig = &vchan->cfg; + struct sun4i_dma_promise *promise; + struct sun4i_dma_contract *contract; + + contract = generate_dma_contract(); + if (!contract) + return NULL; + + /* + * We can only do the copy to bus aligned addresses, so + * choose the best one so we get decent performance. We also + * maximize the burst size for this same reason. + */ + sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + sconfig->src_maxburst = 8; + sconfig->dst_maxburst = 8; + + if (vchan->is_dedicated) + promise = generate_ddma_promise(chan, src, dest, len, sconfig); + else + promise = generate_ndma_promise(chan, src, dest, len, sconfig, + DMA_MEM_TO_MEM); + + if (!promise) { + kfree(contract); + return NULL; + } + + /* Configure memcpy mode */ + if (vchan->is_dedicated) { + promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM) | + SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM); + } else { + promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) | + SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM); + } + + /* Fill the contract with our only promise */ + list_add_tail(&promise->list, &contract->demands); + + /* And add it to the vchan */ + return vchan_tx_prep(&vchan->vc, &contract->vd, flags); +} + +static struct dma_async_tx_descriptor * +sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len, + size_t period_len, enum dma_transfer_direction dir, + unsigned long flags) +{ + struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); + struct dma_slave_config *sconfig = &vchan->cfg; + struct sun4i_dma_promise *promise; + struct sun4i_dma_contract *contract; + dma_addr_t src, dest; + u32 endpoints; + int nr_periods, offset, plength, i; + + if (!is_slave_direction(dir)) { + dev_err(chan2dev(chan), "Invalid DMA direction\n"); + return NULL; + } + + if (vchan->is_dedicated) { + /* + * As we are using this just for audio data, we need to use + * normal DMA. There is nothing stopping us from supporting + * dedicated DMA here as well, so if a client comes up and + * requires it, it will be simple to implement it. + */ + dev_err(chan2dev(chan), + "Cyclic transfers are only supported on Normal DMA\n"); + return NULL; + } + + contract = generate_dma_contract(); + if (!contract) + return NULL; + + contract->is_cyclic = 1; + + /* Figure out the endpoints and the address we need */ + if (dir == DMA_MEM_TO_DEV) { + src = buf; + dest = sconfig->dst_addr; + endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) | + SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) | + SUN4I_DMA_CFG_DST_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO); + } else { + src = sconfig->src_addr; + dest = buf; + endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) | + SUN4I_DMA_CFG_SRC_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO) | + SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM); + } + + /* + * We will be using half done interrupts to make two periods + * out of a promise, so we need to program the DMA engine less + * often + */ + + /* + * The engine can interrupt on half-transfer, so we can use + * this feature to program the engine half as often as if we + * didn't use it (keep in mind the hardware doesn't support + * linked lists). + * + * Say you have a set of periods (| marks the start/end, I for + * interrupt, P for programming the engine to do a new + * transfer), the easy but slow way would be to do + * + * |---|---|---|---| (periods / promises) + * P I,P I,P I,P I + * + * Using half transfer interrupts you can do + * + * |-------|-------| (promises as configured on hw) + * |---|---|---|---| (periods) + * P I I,P I I + * + * Which requires half the engine programming for the same + * functionality. + */ + nr_periods = DIV_ROUND_UP(len / period_len, 2); + for (i = 0; i < nr_periods; i++) { + /* Calculate the offset in the buffer and the length needed */ + offset = i * period_len * 2; + plength = min((len - offset), (period_len * 2)); + if (dir == DMA_MEM_TO_DEV) + src = buf + offset; + else + dest = buf + offset; + + /* Make the promise */ + promise = generate_ndma_promise(chan, src, dest, + plength, sconfig, dir); + if (!promise) { + /* TODO: should we free everything? */ + return NULL; + } + promise->cfg |= endpoints; + + /* Then add it to the contract */ + list_add_tail(&promise->list, &contract->demands); + } + + /* And add it to the vchan */ + return vchan_tx_prep(&vchan->vc, &contract->vd, flags); +} + +static struct dma_async_tx_descriptor * +sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction dir, + unsigned long flags, void *context) +{ + struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); + struct dma_slave_config *sconfig = &vchan->cfg; + struct sun4i_dma_promise *promise; + struct sun4i_dma_contract *contract; + u8 ram_type, io_mode, linear_mode; + struct scatterlist *sg; + dma_addr_t srcaddr, dstaddr; + u32 endpoints, para; + int i; + + if (!sgl) + return NULL; + + if (!is_slave_direction(dir)) { + dev_err(chan2dev(chan), "Invalid DMA direction\n"); + return NULL; + } + + contract = generate_dma_contract(); + if (!contract) + return NULL; + + if (vchan->is_dedicated) { + io_mode = SUN4I_DDMA_ADDR_MODE_IO; + linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR; + ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM; + } else { + io_mode = SUN4I_NDMA_ADDR_MODE_IO; + linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR; + ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM; + } + + if (dir == DMA_MEM_TO_DEV) + endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) | + SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) | + SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) | + SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode); + else + endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) | + SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) | + SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) | + SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode); + + for_each_sg(sgl, sg, sg_len, i) { + /* Figure out addresses */ + if (dir == DMA_MEM_TO_DEV) { + srcaddr = sg_dma_address(sg); + dstaddr = sconfig->dst_addr; + } else { + srcaddr = sconfig->src_addr; + dstaddr = sg_dma_address(sg); + } + + /* + * These are the magic DMA engine timings that keep SPI going. + * I haven't seen any interface on DMAEngine to configure + * timings, and so far they seem to work for everything we + * support, so I've kept them here. I don't know if other + * devices need different timings because, as usual, we only + * have the "para" bitfield meanings, but no comment on what + * the values should be when doing a certain operation :| + */ + para = SUN4I_DDMA_MAGIC_SPI_PARAMETERS; + + /* And make a suitable promise */ + if (vchan->is_dedicated) + promise = generate_ddma_promise(chan, srcaddr, dstaddr, + sg_dma_len(sg), + sconfig); + else + promise = generate_ndma_promise(chan, srcaddr, dstaddr, + sg_dma_len(sg), + sconfig, dir); + + if (!promise) + return NULL; /* TODO: should we free everything? */ + + promise->cfg |= endpoints; + promise->para = para; + + /* Then add it to the contract */ + list_add_tail(&promise->list, &contract->demands); + } + + /* + * Once we've got all the promises ready, add the contract + * to the pending list on the vchan + */ + return vchan_tx_prep(&vchan->vc, &contract->vd, flags); +} + +static int sun4i_dma_terminate_all(struct dma_chan *chan) +{ + struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device); + struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); + struct sun4i_dma_pchan *pchan = vchan->pchan; + LIST_HEAD(head); + unsigned long flags; + + spin_lock_irqsave(&vchan->vc.lock, flags); + vchan_get_all_descriptors(&vchan->vc, &head); + spin_unlock_irqrestore(&vchan->vc.lock, flags); + + /* + * Clearing the configuration register will halt the pchan. Interrupts + * may still trigger, so don't forget to disable them. + */ + if (pchan) { + if (pchan->is_dedicated) + writel(0, pchan->base + SUN4I_DDMA_CFG_REG); + else + writel(0, pchan->base + SUN4I_NDMA_CFG_REG); + set_pchan_interrupt(priv, pchan, 0, 0); + release_pchan(priv, pchan); + } + + spin_lock_irqsave(&vchan->vc.lock, flags); + vchan_dma_desc_free_list(&vchan->vc, &head); + /* Clear these so the vchan is usable again */ + vchan->processing = NULL; + vchan->pchan = NULL; + spin_unlock_irqrestore(&vchan->vc.lock, flags); + + return 0; +} + +static int sun4i_dma_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); + + memcpy(&vchan->cfg, config, sizeof(*config)); + + return 0; +} + +static struct dma_chan *sun4i_dma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct sun4i_dma_dev *priv = ofdma->of_dma_data; + struct sun4i_dma_vchan *vchan; + struct dma_chan *chan; + u8 is_dedicated = dma_spec->args[0]; + u8 endpoint = dma_spec->args[1]; + + /* Check if type is Normal or Dedicated */ + if (is_dedicated != 0 && is_dedicated != 1) + return NULL; + + /* Make sure the endpoint looks sane */ + if ((is_dedicated && endpoint >= SUN4I_DDMA_DRQ_TYPE_LIMIT) || + (!is_dedicated && endpoint >= SUN4I_NDMA_DRQ_TYPE_LIMIT)) + return NULL; + + chan = dma_get_any_slave_channel(&priv->slave); + if (!chan) + return NULL; + + /* Assign the endpoint to the vchan */ + vchan = to_sun4i_dma_vchan(chan); + vchan->is_dedicated = is_dedicated; + vchan->endpoint = endpoint; + + return chan; +} + +static enum dma_status sun4i_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *state) +{ + struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); + struct sun4i_dma_pchan *pchan = vchan->pchan; + struct sun4i_dma_contract *contract; + struct sun4i_dma_promise *promise; + struct virt_dma_desc *vd; + unsigned long flags; + enum dma_status ret; + size_t bytes = 0; + + ret = dma_cookie_status(chan, cookie, state); + if (!state || (ret == DMA_COMPLETE)) + return ret; + + spin_lock_irqsave(&vchan->vc.lock, flags); + vd = vchan_find_desc(&vchan->vc, cookie); + if (!vd) + goto exit; + contract = to_sun4i_dma_contract(vd); + + list_for_each_entry(promise, &contract->demands, list) + bytes += promise->len; + + /* + * The hardware is configured to return the remaining byte + * quantity. If possible, replace the first listed element's + * full size with the actual remaining amount + */ + promise = list_first_entry_or_null(&contract->demands, + struct sun4i_dma_promise, list); + if (promise && pchan) { + bytes -= promise->len; + if (pchan->is_dedicated) + bytes += readl(pchan->base + SUN4I_DDMA_BYTE_COUNT_REG); + else + bytes += readl(pchan->base + SUN4I_NDMA_BYTE_COUNT_REG); + } + +exit: + + dma_set_residue(state, bytes); + spin_unlock_irqrestore(&vchan->vc.lock, flags); + + return ret; +} + +static void sun4i_dma_issue_pending(struct dma_chan *chan) +{ + struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device); + struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); + unsigned long flags; + + spin_lock_irqsave(&vchan->vc.lock, flags); + + /* + * If there are pending transactions for this vchan, push one of + * them into the engine to get the ball rolling. + */ + if (vchan_issue_pending(&vchan->vc)) + __execute_vchan_pending(priv, vchan); + + spin_unlock_irqrestore(&vchan->vc.lock, flags); +} + +static irqreturn_t sun4i_dma_interrupt(int irq, void *dev_id) +{ + struct sun4i_dma_dev *priv = dev_id; + struct sun4i_dma_pchan *pchans = priv->pchans, *pchan; + struct sun4i_dma_vchan *vchan; + struct sun4i_dma_contract *contract; + struct sun4i_dma_promise *promise; + unsigned long pendirq, irqs, disableirqs; + int bit, i, free_room, allow_mitigation = 1; + + pendirq = readl_relaxed(priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG); + +handle_pending: + + disableirqs = 0; + free_room = 0; + + for_each_set_bit(bit, &pendirq, 32) { + pchan = &pchans[bit >> 1]; + vchan = pchan->vchan; + if (!vchan) /* a terminated channel may still interrupt */ + continue; + contract = vchan->contract; + + /* + * Disable the IRQ and free the pchan if it's an end + * interrupt (odd bit) + */ + if (bit & 1) { + spin_lock(&vchan->vc.lock); + + /* + * Move the promise into the completed list now that + * we're done with it + */ + list_del(&vchan->processing->list); + list_add_tail(&vchan->processing->list, + &contract->completed_demands); + + /* + * Cyclic DMA transfers are special: + * - There's always something we can dispatch + * - We need to run the callback + * - Latency is very important, as this is used by audio + * We therefore just cycle through the list and dispatch + * whatever we have here, reusing the pchan. There's + * no need to run the thread after this. + * + * For non-cyclic transfers we need to look around, + * so we can program some more work, or notify the + * client that their transfers have been completed. + */ + if (contract->is_cyclic) { + promise = get_next_cyclic_promise(contract); + vchan->processing = promise; + configure_pchan(pchan, promise); + vchan_cyclic_callback(&contract->vd); + } else { + vchan->processing = NULL; + vchan->pchan = NULL; + + free_room = 1; + disableirqs |= BIT(bit); + release_pchan(priv, pchan); + } + + spin_unlock(&vchan->vc.lock); + } else { + /* Half done interrupt */ + if (contract->is_cyclic) + vchan_cyclic_callback(&contract->vd); + else + disableirqs |= BIT(bit); + } + } + + /* Disable the IRQs for events we handled */ + spin_lock(&priv->lock); + irqs = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG); + writel_relaxed(irqs & ~disableirqs, + priv->base + SUN4I_DMA_IRQ_ENABLE_REG); + spin_unlock(&priv->lock); + + /* Writing 1 to the pending field will clear the pending interrupt */ + writel_relaxed(pendirq, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG); + + /* + * If a pchan was freed, we may be able to schedule something else, + * so have a look around + */ + if (free_room) { + for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) { + vchan = &priv->vchans[i]; + spin_lock(&vchan->vc.lock); + __execute_vchan_pending(priv, vchan); + spin_unlock(&vchan->vc.lock); + } + } + + /* + * Handle newer interrupts if some showed up, but only do it once + * to avoid a too long a loop + */ + if (allow_mitigation) { + pendirq = readl_relaxed(priv->base + + SUN4I_DMA_IRQ_PENDING_STATUS_REG); + if (pendirq) { + allow_mitigation = 0; + goto handle_pending; + } + } + + return IRQ_HANDLED; +} + +static int sun4i_dma_probe(struct platform_device *pdev) +{ + struct sun4i_dma_dev *priv; + struct resource *res; + int i, j, ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->irq = platform_get_irq(pdev, 0); + if (priv->irq < 0) { + dev_err(&pdev->dev, "Cannot claim IRQ\n"); + return priv->irq; + } + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) { + dev_err(&pdev->dev, "No clock specified\n"); + return PTR_ERR(priv->clk); + } + + platform_set_drvdata(pdev, priv); + spin_lock_init(&priv->lock); + + dma_cap_zero(priv->slave.cap_mask); + dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask); + dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask); + dma_cap_set(DMA_CYCLIC, priv->slave.cap_mask); + dma_cap_set(DMA_SLAVE, priv->slave.cap_mask); + + INIT_LIST_HEAD(&priv->slave.channels); + priv->slave.device_free_chan_resources = sun4i_dma_free_chan_resources; + priv->slave.device_tx_status = sun4i_dma_tx_status; + priv->slave.device_issue_pending = sun4i_dma_issue_pending; + priv->slave.device_prep_slave_sg = sun4i_dma_prep_slave_sg; + priv->slave.device_prep_dma_memcpy = sun4i_dma_prep_dma_memcpy; + priv->slave.device_prep_dma_cyclic = sun4i_dma_prep_dma_cyclic; + priv->slave.device_config = sun4i_dma_config; + priv->slave.device_terminate_all = sun4i_dma_terminate_all; + priv->slave.copy_align = 2; + priv->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + priv->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + priv->slave.directions = BIT(DMA_DEV_TO_MEM) | + BIT(DMA_MEM_TO_DEV); + priv->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + + priv->slave.dev = &pdev->dev; + + priv->pchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_CHANNELS, + sizeof(struct sun4i_dma_pchan), GFP_KERNEL); + priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS, + sizeof(struct sun4i_dma_vchan), GFP_KERNEL); + if (!priv->vchans || !priv->pchans) + return -ENOMEM; + + /* + * [0..SUN4I_NDMA_NR_MAX_CHANNELS) are normal pchans, and + * [SUN4I_NDMA_NR_MAX_CHANNELS..SUN4I_DMA_NR_MAX_CHANNELS) are + * dedicated ones + */ + for (i = 0; i < SUN4I_NDMA_NR_MAX_CHANNELS; i++) + priv->pchans[i].base = priv->base + + SUN4I_NDMA_CHANNEL_REG_BASE(i); + + for (j = 0; i < SUN4I_DMA_NR_MAX_CHANNELS; i++, j++) { + priv->pchans[i].base = priv->base + + SUN4I_DDMA_CHANNEL_REG_BASE(j); + priv->pchans[i].is_dedicated = 1; + } + + for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) { + struct sun4i_dma_vchan *vchan = &priv->vchans[i]; + + spin_lock_init(&vchan->vc.lock); + vchan->vc.desc_free = sun4i_dma_free_contract; + vchan_init(&vchan->vc, &priv->slave); + } + + ret = clk_prepare_enable(priv->clk); + if (ret) { + dev_err(&pdev->dev, "Couldn't enable the clock\n"); + return ret; + } + + /* + * Make sure the IRQs are all disabled and accounted for. The bootloader + * likes to leave these dirty + */ + writel(0, priv->base + SUN4I_DMA_IRQ_ENABLE_REG); + writel(0xFFFFFFFF, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG); + + ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt, + 0, dev_name(&pdev->dev), priv); + if (ret) { + dev_err(&pdev->dev, "Cannot request IRQ\n"); + goto err_clk_disable; + } + + ret = dma_async_device_register(&priv->slave); + if (ret) { + dev_warn(&pdev->dev, "Failed to register DMA engine device\n"); + goto err_clk_disable; + } + + ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate, + priv); + if (ret) { + dev_err(&pdev->dev, "of_dma_controller_register failed\n"); + goto err_dma_unregister; + } + + dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n"); + + return 0; + +err_dma_unregister: + dma_async_device_unregister(&priv->slave); +err_clk_disable: + clk_disable_unprepare(priv->clk); + return ret; +} + +static int sun4i_dma_remove(struct platform_device *pdev) +{ + struct sun4i_dma_dev *priv = platform_get_drvdata(pdev); + + /* Disable IRQ so no more work is scheduled */ + disable_irq(priv->irq); + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&priv->slave); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static const struct of_device_id sun4i_dma_match[] = { + { .compatible = "allwinner,sun4i-a10-dma" }, + { /* sentinel */ }, +}; + +static struct platform_driver sun4i_dma_driver = { + .probe = sun4i_dma_probe, + .remove = sun4i_dma_remove, + .driver = { + .name = "sun4i-dma", + .of_match_table = sun4i_dma_match, + }, +}; + +module_platform_driver(sun4i_dma_driver); + +MODULE_DESCRIPTION("Allwinner A10 Dedicated DMA Controller Driver"); +MODULE_AUTHOR("Emilio López "); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 842ff97c2..73e0be6e2 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -969,7 +969,7 @@ static int sun6i_dma_probe(struct platform_device *pdev) sdc->slave.device_issue_pending = sun6i_dma_issue_pending; sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg; sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; - sdc->slave.copy_align = 4; + sdc->slave.copy_align = DMAENGINE_ALIGN_4_BYTES; sdc->slave.device_config = sun6i_dma_config; sdc->slave.device_pause = sun6i_dma_pause; sdc->slave.device_resume = sun6i_dma_resume; diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index eaf585e82..c8f79dcaa 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -155,7 +155,6 @@ struct tegra_dma_sg_req { int req_len; bool configured; bool last_sg; - bool half_done; struct list_head node; struct tegra_dma_desc *dma_desc; }; @@ -188,7 +187,7 @@ struct tegra_dma_channel { bool config_init; int id; int irq; - unsigned long chan_base_offset; + void __iomem *chan_addr; spinlock_t lock; bool busy; struct tegra_dma *tdma; @@ -203,8 +202,6 @@ struct tegra_dma_channel { /* ISR handler and tasklet for bottom half of isr handling */ dma_isr_handler isr_handler; struct tasklet_struct tasklet; - dma_async_tx_callback callback; - void *callback_param; /* Channel-slave specific configuration */ unsigned int slave_id; @@ -222,6 +219,13 @@ struct tegra_dma { void __iomem *base_addr; const struct tegra_dma_chip_data *chip_data; + /* + * Counter for managing global pausing of the DMA controller. + * Only applicable for devices that don't support individual + * channel pausing. + */ + u32 global_pause_count; + /* Some register need to be cache before suspend */ u32 reg_gen; @@ -242,12 +246,12 @@ static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg) static inline void tdc_write(struct tegra_dma_channel *tdc, u32 reg, u32 val) { - writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg); + writel(val, tdc->chan_addr + reg); } static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg) { - return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg); + return readl(tdc->chan_addr + reg); } static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc) @@ -361,16 +365,32 @@ static void tegra_dma_global_pause(struct tegra_dma_channel *tdc, struct tegra_dma *tdma = tdc->tdma; spin_lock(&tdma->global_lock); - tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0); - if (wait_for_burst_complete) - udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); + + if (tdc->tdma->global_pause_count == 0) { + tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0); + if (wait_for_burst_complete) + udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); + } + + tdc->tdma->global_pause_count++; + + spin_unlock(&tdma->global_lock); } static void tegra_dma_global_resume(struct tegra_dma_channel *tdc) { struct tegra_dma *tdma = tdc->tdma; - tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); + spin_lock(&tdma->global_lock); + + if (WARN_ON(tdc->tdma->global_pause_count == 0)) + goto out; + + if (--tdc->tdma->global_pause_count == 0) + tdma_write(tdma, TEGRA_APBDMA_GENERAL, + TEGRA_APBDMA_GENERAL_ENABLE); + +out: spin_unlock(&tdma->global_lock); } @@ -601,7 +621,6 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc, return; tdc_start_head_req(tdc); - return; } static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc, @@ -628,7 +647,6 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc, if (!st) dma_desc->dma_status = DMA_ERROR; } - return; } static void tegra_dma_tasklet(unsigned long data) @@ -720,7 +738,6 @@ static void tegra_dma_issue_pending(struct dma_chan *dc) } end: spin_unlock_irqrestore(&tdc->lock, flags); - return; } static int tegra_dma_terminate_all(struct dma_chan *dc) @@ -932,7 +949,6 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( struct tegra_dma_sg_req *sg_req = NULL; u32 burst_size; enum dma_slave_buswidth slave_bw; - int ret; if (!tdc->config_init) { dev_err(tdc2dev(tdc), "dma channel is not configured\n"); @@ -943,9 +959,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( return NULL; } - ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, - &burst_size, &slave_bw); - if (ret < 0) + if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, + &burst_size, &slave_bw) < 0) return NULL; INIT_LIST_HEAD(&req_list); @@ -1048,7 +1063,6 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( dma_addr_t mem = buf_addr; u32 burst_size; enum dma_slave_buswidth slave_bw; - int ret; if (!buf_len || !period_len) { dev_err(tdc2dev(tdc), "Invalid buffer/period len\n"); @@ -1087,12 +1101,10 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( return NULL; } - ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, - &burst_size, &slave_bw); - if (ret < 0) + if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, + &burst_size, &slave_bw) < 0) return NULL; - ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB; ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE << TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; @@ -1136,7 +1148,6 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( sg_req->ch_regs.apb_seq = apb_seq; sg_req->ch_regs.ahb_seq = ahb_seq; sg_req->configured = false; - sg_req->half_done = false; sg_req->last_sg = false; sg_req->dma_desc = dma_desc; sg_req->req_len = len; @@ -1377,8 +1388,9 @@ static int tegra_dma_probe(struct platform_device *pdev) for (i = 0; i < cdata->nr_channels; i++) { struct tegra_dma_channel *tdc = &tdma->channels[i]; - tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + - i * cdata->channel_reg_size; + tdc->chan_addr = tdma->base_addr + + TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + + (i * cdata->channel_reg_size); res = platform_get_resource(pdev, IORESOURCE_IRQ, i); if (!res) { @@ -1418,6 +1430,7 @@ static int tegra_dma_probe(struct platform_device *pdev) dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); + tdma->global_pause_count = 0; tdma->dma_dev.dev = &pdev->dev; tdma->dma_dev.device_alloc_chan_resources = tegra_dma_alloc_chan_resources; diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index 24f5ca235..5cce8c9d0 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c @@ -20,16 +20,19 @@ #define TI_XBAR_OUTPUTS 127 #define TI_XBAR_INPUTS 256 -static DEFINE_IDR(map_idr); +#define TI_XBAR_EDMA_OFFSET 0 +#define TI_XBAR_SDMA_OFFSET 1 struct ti_dma_xbar_data { void __iomem *iomem; struct dma_router dmarouter; + struct idr map_idr; u16 safe_val; /* Value to rest the crossbar lines */ u32 xbar_requests; /* number of DMA requests connected to XBAR */ u32 dma_requests; /* number of DMA requests forwarded to DMA */ + u32 dma_offset; }; struct ti_dma_xbar_map { @@ -51,7 +54,7 @@ static void ti_dma_xbar_free(struct device *dev, void *route_data) map->xbar_in, map->xbar_out); ti_dma_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val); - idr_remove(&map_idr, map->xbar_out); + idr_remove(&xbar->map_idr, map->xbar_out); kfree(map); } @@ -81,12 +84,11 @@ static void *ti_dma_xbar_route_allocate(struct of_phandle_args *dma_spec, return ERR_PTR(-ENOMEM); } - map->xbar_out = idr_alloc(&map_idr, NULL, 0, xbar->dma_requests, + map->xbar_out = idr_alloc(&xbar->map_idr, NULL, 0, xbar->dma_requests, GFP_KERNEL); map->xbar_in = (u16)dma_spec->args[0]; - /* The DMA request is 1 based in sDMA */ - dma_spec->args[0] = map->xbar_out + 1; + dma_spec->args[0] = map->xbar_out + xbar->dma_offset; dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n", map->xbar_in, map->xbar_out); @@ -96,9 +98,22 @@ static void *ti_dma_xbar_route_allocate(struct of_phandle_args *dma_spec, return map; } +static const struct of_device_id ti_dma_master_match[] = { + { + .compatible = "ti,omap4430-sdma", + .data = (void *)TI_XBAR_SDMA_OFFSET, + }, + { + .compatible = "ti,edma3", + .data = (void *)TI_XBAR_EDMA_OFFSET, + }, + {}, +}; + static int ti_dma_xbar_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; + const struct of_device_id *match; struct device_node *dma_node; struct ti_dma_xbar_data *xbar; struct resource *res; @@ -113,12 +128,20 @@ static int ti_dma_xbar_probe(struct platform_device *pdev) if (!xbar) return -ENOMEM; + idr_init(&xbar->map_idr); + dma_node = of_parse_phandle(node, "dma-masters", 0); if (!dma_node) { dev_err(&pdev->dev, "Can't get DMA master node\n"); return -ENODEV; } + match = of_match_node(ti_dma_master_match, dma_node); + if (!match) { + dev_err(&pdev->dev, "DMA master is not supported\n"); + return -EINVAL; + } + if (of_property_read_u32(dma_node, "dma-requests", &xbar->dma_requests)) { dev_info(&pdev->dev, @@ -139,17 +162,15 @@ static int ti_dma_xbar_probe(struct platform_device *pdev) xbar->safe_val = (u16)safe_val; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENODEV; - iomem = devm_ioremap_resource(&pdev->dev, res); - if (!iomem) - return -ENOMEM; + if (IS_ERR(iomem)) + return PTR_ERR(iomem); xbar->iomem = iomem; xbar->dmarouter.dev = &pdev->dev; xbar->dmarouter.route_free = ti_dma_xbar_free; + xbar->dma_offset = (u32)match->data; platform_set_drvdata(pdev, xbar); diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index c4c3d93fd..559cd4073 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -10,10 +10,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Supports: diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index dff22ab01..8d57b1b12 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -21,6 +21,7 @@ * NOTE: PM support is currently not available. */ +#include #include #include #include @@ -58,7 +59,6 @@ #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF -#define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1) #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) #define XGENE_DMA_RING_CMD_OFFSET 0x2C @@ -151,7 +151,6 @@ #define XGENE_DMA_PQ_CHANNEL 1 #define XGENE_DMA_MAX_BYTE_CNT 0x4000 /* 16 KB */ #define XGENE_DMA_MAX_64B_DESC_BYTE_CNT 0x14000 /* 80 KB */ -#define XGENE_DMA_XOR_ALIGNMENT 6 /* 64 Bytes */ #define XGENE_DMA_MAX_XOR_SRC 5 #define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0 #define XGENE_DMA_INVALID_LEN_CODE 0x7800000000000000ULL @@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt) return flyby_type[src_cnt]; } -static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring) -{ - u32 __iomem *cmd_base = ring->cmd_base; - u32 ring_state = ioread32(&cmd_base[1]); - - return XGENE_DMA_RING_DESC_CNT(ring_state); -} - static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, dma_addr_t *paddr) { @@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan, dma_pool_free(chan->desc_pool, desc, desc->tx.phys); } -static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, - struct xgene_dma_desc_sw *desc_sw) +static void xgene_chan_xfer_request(struct xgene_dma_chan *chan, + struct xgene_dma_desc_sw *desc_sw) { + struct xgene_dma_ring *ring = &chan->tx_ring; struct xgene_dma_desc_hw *desc_hw; - /* Check if can push more descriptor to hw for execution */ - if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2)) - return -EBUSY; - /* Get hw descriptor from DMA tx ring */ desc_hw = &ring->desc_hw[ring->head]; @@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); } + /* Increment the pending transaction count */ + chan->pending += ((desc_sw->flags & + XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); + /* Notify the hw that we have descriptor ready for execution */ iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? 2 : 1, ring->cmd); - - return 0; } /** @@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) { struct xgene_dma_desc_sw *desc_sw, *_desc_sw; - int ret; /* * If the list of pending descriptors is empty, then we @@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) if (chan->pending >= chan->max_outstanding) return; - ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw); - if (ret) - return; + xgene_chan_xfer_request(chan, desc_sw); /* * Delete this element from ld pending queue and append it to * ld running queue */ list_move_tail(&desc_sw->node, &chan->ld_running); - - /* Increment the pending transaction count */ - chan->pending++; } } @@ -764,12 +748,17 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) struct xgene_dma_ring *ring = &chan->rx_ring; struct xgene_dma_desc_sw *desc_sw, *_desc_sw; struct xgene_dma_desc_hw *desc_hw; + struct list_head ld_completed; u8 status; + INIT_LIST_HEAD(&ld_completed); + + spin_lock_bh(&chan->lock); + /* Clean already completed and acked descriptors */ xgene_dma_clean_completed_descriptor(chan); - /* Run the callback for each descriptor, in order */ + /* Move all completed descriptors to ld completed queue, in order */ list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) { /* Get subsequent hw descriptor from DMA rx ring */ desc_hw = &ring->desc_hw[ring->head]; @@ -812,15 +801,18 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) /* Mark this hw descriptor as processed */ desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE); - xgene_dma_run_tx_complete_actions(chan, desc_sw); - - xgene_dma_clean_running_descriptor(chan, desc_sw); - /* * Decrement the pending transaction count * as we have processed one */ - chan->pending--; + chan->pending -= ((desc_sw->flags & + XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); + + /* + * Delete this node from ld running queue and append it to + * ld completed queue for further processing + */ + list_move_tail(&desc_sw->node, &ld_completed); } /* @@ -829,6 +821,14 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) * ahead and free the descriptors below. */ xgene_chan_xfer_ld_pending(chan); + + spin_unlock_bh(&chan->lock); + + /* Run the callback for each descriptor, in order */ + list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) { + xgene_dma_run_tx_complete_actions(chan, desc_sw); + xgene_dma_clean_running_descriptor(chan, desc_sw); + } } static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan) @@ -877,11 +877,11 @@ static void xgene_dma_free_chan_resources(struct dma_chan *dchan) if (!chan->desc_pool) return; - spin_lock_bh(&chan->lock); - /* Process all running descriptor */ xgene_dma_cleanup_descriptors(chan); + spin_lock_bh(&chan->lock); + /* Clean all link descriptor queues */ xgene_dma_free_desc_list(chan, &chan->ld_pending); xgene_dma_free_desc_list(chan, &chan->ld_running); @@ -1201,15 +1201,11 @@ static void xgene_dma_tasklet_cb(unsigned long data) { struct xgene_dma_chan *chan = (struct xgene_dma_chan *)data; - spin_lock_bh(&chan->lock); - /* Run all cleanup for descriptors which have been completed */ xgene_dma_cleanup_descriptors(chan); /* Re-enable DMA channel IRQ */ enable_irq(chan->rx_irq); - - spin_unlock_bh(&chan->lock); } static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id) @@ -1410,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, struct xgene_dma_ring *ring, enum xgene_dma_ring_cfgsize cfgsize) { + int ret; + /* Setup DMA ring descriptor variables */ ring->pdma = chan->pdma; ring->cfgsize = cfgsize; ring->num = chan->pdma->ring_num++; ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); - ring->size = xgene_dma_get_ring_size(chan, cfgsize); - if (ring->size <= 0) - return ring->size; + ret = xgene_dma_get_ring_size(chan, cfgsize); + if (ret <= 0) + return ret; + ring->size = ret; /* Allocate memory for DMA ring descriptor */ ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, @@ -1471,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan) tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); /* Set the max outstanding request possible to this channel */ - chan->max_outstanding = rx_ring->slots; + chan->max_outstanding = tx_ring->slots; return ret; } @@ -1741,13 +1740,13 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan, if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { dma_dev->device_prep_dma_xor = xgene_dma_prep_xor; dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC; - dma_dev->xor_align = XGENE_DMA_XOR_ALIGNMENT; + dma_dev->xor_align = DMAENGINE_ALIGN_64_BYTES; } if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { dma_dev->device_prep_dma_pq = xgene_dma_prep_pq; dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC; - dma_dev->pq_align = XGENE_DMA_XOR_ALIGNMENT; + dma_dev->pq_align = DMAENGINE_ALIGN_64_BYTES; } } @@ -1944,16 +1943,18 @@ static int xgene_dma_probe(struct platform_device *pdev) return ret; pdma->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(pdma->clk)) { + if (IS_ERR(pdma->clk) && !ACPI_COMPANION(&pdev->dev)) { dev_err(&pdev->dev, "Failed to get clk\n"); return PTR_ERR(pdma->clk); } /* Enable clk before accessing registers */ - ret = clk_prepare_enable(pdma->clk); - if (ret) { - dev_err(&pdev->dev, "Failed to enable clk %d\n", ret); - return ret; + if (!IS_ERR(pdma->clk)) { + ret = clk_prepare_enable(pdma->clk); + if (ret) { + dev_err(&pdev->dev, "Failed to enable clk %d\n", ret); + return ret; + } } /* Remove DMA RAM out of shutdown */ @@ -1998,7 +1999,8 @@ err_request_irq: err_dma_mask: err_clk_enable: - clk_disable_unprepare(pdma->clk); + if (!IS_ERR(pdma->clk)) + clk_disable_unprepare(pdma->clk); return ret; } @@ -2022,11 +2024,20 @@ static int xgene_dma_remove(struct platform_device *pdev) xgene_dma_delete_chan_rings(chan); } - clk_disable_unprepare(pdma->clk); + if (!IS_ERR(pdma->clk)) + clk_disable_unprepare(pdma->clk); return 0; } +#ifdef CONFIG_ACPI +static const struct acpi_device_id xgene_dma_acpi_match_ptr[] = { + {"APMC0D43", 0}, + {}, +}; +MODULE_DEVICE_TABLE(acpi, xgene_dma_acpi_match_ptr); +#endif + static const struct of_device_id xgene_dma_of_match_ptr[] = { {.compatible = "apm,xgene-storm-dma",}, {}, @@ -2039,6 +2050,7 @@ static struct platform_driver xgene_dma_driver = { .driver = { .name = "X-Gene-DMA", .of_match_table = xgene_dma_of_match_ptr, + .acpi_match_table = ACPI_PTR(xgene_dma_acpi_match_ptr), }, }; diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c new file mode 100644 index 000000000..c017fcd8e --- /dev/null +++ b/drivers/dma/zx296702_dma.c @@ -0,0 +1,951 @@ +/* + * Copyright 2015 Linaro. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "virt-dma.h" + +#define DRIVER_NAME "zx-dma" +#define DMA_ALIGN 4 +#define DMA_MAX_SIZE (0x10000 - PAGE_SIZE) +#define LLI_BLOCK_SIZE (4 * PAGE_SIZE) + +#define REG_ZX_SRC_ADDR 0x00 +#define REG_ZX_DST_ADDR 0x04 +#define REG_ZX_TX_X_COUNT 0x08 +#define REG_ZX_TX_ZY_COUNT 0x0c +#define REG_ZX_SRC_ZY_STEP 0x10 +#define REG_ZX_DST_ZY_STEP 0x14 +#define REG_ZX_LLI_ADDR 0x1c +#define REG_ZX_CTRL 0x20 +#define REG_ZX_TC_IRQ 0x800 +#define REG_ZX_SRC_ERR_IRQ 0x804 +#define REG_ZX_DST_ERR_IRQ 0x808 +#define REG_ZX_CFG_ERR_IRQ 0x80c +#define REG_ZX_TC_IRQ_RAW 0x810 +#define REG_ZX_SRC_ERR_IRQ_RAW 0x814 +#define REG_ZX_DST_ERR_IRQ_RAW 0x818 +#define REG_ZX_CFG_ERR_IRQ_RAW 0x81c +#define REG_ZX_STATUS 0x820 +#define REG_ZX_DMA_GRP_PRIO 0x824 +#define REG_ZX_DMA_ARB 0x828 + +#define ZX_FORCE_CLOSE BIT(31) +#define ZX_DST_BURST_WIDTH(x) (((x) & 0x7) << 13) +#define ZX_MAX_BURST_LEN 16 +#define ZX_SRC_BURST_LEN(x) (((x) & 0xf) << 9) +#define ZX_SRC_BURST_WIDTH(x) (((x) & 0x7) << 6) +#define ZX_IRQ_ENABLE_ALL (3 << 4) +#define ZX_DST_FIFO_MODE BIT(3) +#define ZX_SRC_FIFO_MODE BIT(2) +#define ZX_SOFT_REQ BIT(1) +#define ZX_CH_ENABLE BIT(0) + +#define ZX_DMA_BUSWIDTHS \ + (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ + BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) + +enum zx_dma_burst_width { + ZX_DMA_WIDTH_8BIT = 0, + ZX_DMA_WIDTH_16BIT = 1, + ZX_DMA_WIDTH_32BIT = 2, + ZX_DMA_WIDTH_64BIT = 3, +}; + +struct zx_desc_hw { + u32 saddr; + u32 daddr; + u32 src_x; + u32 src_zy; + u32 src_zy_step; + u32 dst_zy_step; + u32 reserved1; + u32 lli; + u32 ctr; + u32 reserved[7]; /* pack as hardware registers region size */ +} __aligned(32); + +struct zx_dma_desc_sw { + struct virt_dma_desc vd; + dma_addr_t desc_hw_lli; + size_t desc_num; + size_t size; + struct zx_desc_hw *desc_hw; +}; + +struct zx_dma_phy; + +struct zx_dma_chan { + struct dma_slave_config slave_cfg; + int id; /* Request phy chan id */ + u32 ccfg; + u32 cyclic; + struct virt_dma_chan vc; + struct zx_dma_phy *phy; + struct list_head node; + dma_addr_t dev_addr; + enum dma_status status; +}; + +struct zx_dma_phy { + u32 idx; + void __iomem *base; + struct zx_dma_chan *vchan; + struct zx_dma_desc_sw *ds_run; + struct zx_dma_desc_sw *ds_done; +}; + +struct zx_dma_dev { + struct dma_device slave; + void __iomem *base; + spinlock_t lock; /* lock for ch and phy */ + struct list_head chan_pending; + struct zx_dma_phy *phy; + struct zx_dma_chan *chans; + struct clk *clk; + struct dma_pool *pool; + u32 dma_channels; + u32 dma_requests; + int irq; +}; + +#define to_zx_dma(dmadev) container_of(dmadev, struct zx_dma_dev, slave) + +static struct zx_dma_chan *to_zx_chan(struct dma_chan *chan) +{ + return container_of(chan, struct zx_dma_chan, vc.chan); +} + +static void zx_dma_terminate_chan(struct zx_dma_phy *phy, struct zx_dma_dev *d) +{ + u32 val = 0; + + val = readl_relaxed(phy->base + REG_ZX_CTRL); + val &= ~ZX_CH_ENABLE; + val |= ZX_FORCE_CLOSE; + writel_relaxed(val, phy->base + REG_ZX_CTRL); + + val = 0x1 << phy->idx; + writel_relaxed(val, d->base + REG_ZX_TC_IRQ_RAW); + writel_relaxed(val, d->base + REG_ZX_SRC_ERR_IRQ_RAW); + writel_relaxed(val, d->base + REG_ZX_DST_ERR_IRQ_RAW); + writel_relaxed(val, d->base + REG_ZX_CFG_ERR_IRQ_RAW); +} + +static void zx_dma_set_desc(struct zx_dma_phy *phy, struct zx_desc_hw *hw) +{ + writel_relaxed(hw->saddr, phy->base + REG_ZX_SRC_ADDR); + writel_relaxed(hw->daddr, phy->base + REG_ZX_DST_ADDR); + writel_relaxed(hw->src_x, phy->base + REG_ZX_TX_X_COUNT); + writel_relaxed(0, phy->base + REG_ZX_TX_ZY_COUNT); + writel_relaxed(0, phy->base + REG_ZX_SRC_ZY_STEP); + writel_relaxed(0, phy->base + REG_ZX_DST_ZY_STEP); + writel_relaxed(hw->lli, phy->base + REG_ZX_LLI_ADDR); + writel_relaxed(hw->ctr, phy->base + REG_ZX_CTRL); +} + +static u32 zx_dma_get_curr_lli(struct zx_dma_phy *phy) +{ + return readl_relaxed(phy->base + REG_ZX_LLI_ADDR); +} + +static u32 zx_dma_get_chan_stat(struct zx_dma_dev *d) +{ + return readl_relaxed(d->base + REG_ZX_STATUS); +} + +static void zx_dma_init_state(struct zx_dma_dev *d) +{ + /* set same priority */ + writel_relaxed(0x0, d->base + REG_ZX_DMA_ARB); + /* clear all irq */ + writel_relaxed(0xffffffff, d->base + REG_ZX_TC_IRQ_RAW); + writel_relaxed(0xffffffff, d->base + REG_ZX_SRC_ERR_IRQ_RAW); + writel_relaxed(0xffffffff, d->base + REG_ZX_DST_ERR_IRQ_RAW); + writel_relaxed(0xffffffff, d->base + REG_ZX_CFG_ERR_IRQ_RAW); +} + +static int zx_dma_start_txd(struct zx_dma_chan *c) +{ + struct zx_dma_dev *d = to_zx_dma(c->vc.chan.device); + struct virt_dma_desc *vd = vchan_next_desc(&c->vc); + + if (!c->phy) + return -EAGAIN; + + if (BIT(c->phy->idx) & zx_dma_get_chan_stat(d)) + return -EAGAIN; + + if (vd) { + struct zx_dma_desc_sw *ds = + container_of(vd, struct zx_dma_desc_sw, vd); + /* + * fetch and remove request from vc->desc_issued + * so vc->desc_issued only contains desc pending + */ + list_del(&ds->vd.node); + c->phy->ds_run = ds; + c->phy->ds_done = NULL; + /* start dma */ + zx_dma_set_desc(c->phy, ds->desc_hw); + return 0; + } + c->phy->ds_done = NULL; + c->phy->ds_run = NULL; + return -EAGAIN; +} + +static void zx_dma_task(struct zx_dma_dev *d) +{ + struct zx_dma_phy *p; + struct zx_dma_chan *c, *cn; + unsigned pch, pch_alloc = 0; + unsigned long flags; + + /* check new dma request of running channel in vc->desc_issued */ + list_for_each_entry_safe(c, cn, &d->slave.channels, + vc.chan.device_node) { + spin_lock_irqsave(&c->vc.lock, flags); + p = c->phy; + if (p && p->ds_done && zx_dma_start_txd(c)) { + /* No current txd associated with this channel */ + dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx); + /* Mark this channel free */ + c->phy = NULL; + p->vchan = NULL; + } + spin_unlock_irqrestore(&c->vc.lock, flags); + } + + /* check new channel request in d->chan_pending */ + spin_lock_irqsave(&d->lock, flags); + while (!list_empty(&d->chan_pending)) { + c = list_first_entry(&d->chan_pending, + struct zx_dma_chan, node); + p = &d->phy[c->id]; + if (!p->vchan) { + /* remove from d->chan_pending */ + list_del_init(&c->node); + pch_alloc |= 1 << c->id; + /* Mark this channel allocated */ + p->vchan = c; + c->phy = p; + } else { + dev_dbg(d->slave.dev, "pchan %u: busy!\n", c->id); + } + } + spin_unlock_irqrestore(&d->lock, flags); + + for (pch = 0; pch < d->dma_channels; pch++) { + if (pch_alloc & (1 << pch)) { + p = &d->phy[pch]; + c = p->vchan; + if (c) { + spin_lock_irqsave(&c->vc.lock, flags); + zx_dma_start_txd(c); + spin_unlock_irqrestore(&c->vc.lock, flags); + } + } + } +} + +static irqreturn_t zx_dma_int_handler(int irq, void *dev_id) +{ + struct zx_dma_dev *d = (struct zx_dma_dev *)dev_id; + struct zx_dma_phy *p; + struct zx_dma_chan *c; + u32 tc = readl_relaxed(d->base + REG_ZX_TC_IRQ); + u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ); + u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ); + u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ); + u32 i, irq_chan = 0, task = 0; + + while (tc) { + i = __ffs(tc); + tc &= ~BIT(i); + p = &d->phy[i]; + c = p->vchan; + if (c) { + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + if (c->cyclic) { + vchan_cyclic_callback(&p->ds_run->vd); + } else { + vchan_cookie_complete(&p->ds_run->vd); + p->ds_done = p->ds_run; + task = 1; + } + spin_unlock_irqrestore(&c->vc.lock, flags); + irq_chan |= BIT(i); + } + } + + if (serr || derr || cfg) + dev_warn(d->slave.dev, "DMA ERR src 0x%x, dst 0x%x, cfg 0x%x\n", + serr, derr, cfg); + + writel_relaxed(irq_chan, d->base + REG_ZX_TC_IRQ_RAW); + writel_relaxed(serr, d->base + REG_ZX_SRC_ERR_IRQ_RAW); + writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW); + writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW); + + if (task) + zx_dma_task(d); + return IRQ_HANDLED; +} + +static void zx_dma_free_chan_resources(struct dma_chan *chan) +{ + struct zx_dma_chan *c = to_zx_chan(chan); + struct zx_dma_dev *d = to_zx_dma(chan->device); + unsigned long flags; + + spin_lock_irqsave(&d->lock, flags); + list_del_init(&c->node); + spin_unlock_irqrestore(&d->lock, flags); + + vchan_free_chan_resources(&c->vc); + c->ccfg = 0; +} + +static enum dma_status zx_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *state) +{ + struct zx_dma_chan *c = to_zx_chan(chan); + struct zx_dma_phy *p; + struct virt_dma_desc *vd; + unsigned long flags; + enum dma_status ret; + size_t bytes = 0; + + ret = dma_cookie_status(&c->vc.chan, cookie, state); + if (ret == DMA_COMPLETE || !state) + return ret; + + spin_lock_irqsave(&c->vc.lock, flags); + p = c->phy; + ret = c->status; + + /* + * If the cookie is on our issue queue, then the residue is + * its total size. + */ + vd = vchan_find_desc(&c->vc, cookie); + if (vd) { + bytes = container_of(vd, struct zx_dma_desc_sw, vd)->size; + } else if ((!p) || (!p->ds_run)) { + bytes = 0; + } else { + struct zx_dma_desc_sw *ds = p->ds_run; + u32 clli = 0, index = 0; + + bytes = 0; + clli = zx_dma_get_curr_lli(p); + index = (clli - ds->desc_hw_lli) / sizeof(struct zx_desc_hw); + for (; index < ds->desc_num; index++) { + bytes += ds->desc_hw[index].src_x; + /* end of lli */ + if (!ds->desc_hw[index].lli) + break; + } + } + spin_unlock_irqrestore(&c->vc.lock, flags); + dma_set_residue(state, bytes); + return ret; +} + +static void zx_dma_issue_pending(struct dma_chan *chan) +{ + struct zx_dma_chan *c = to_zx_chan(chan); + struct zx_dma_dev *d = to_zx_dma(chan->device); + unsigned long flags; + int issue = 0; + + spin_lock_irqsave(&c->vc.lock, flags); + /* add request to vc->desc_issued */ + if (vchan_issue_pending(&c->vc)) { + spin_lock(&d->lock); + if (!c->phy && list_empty(&c->node)) { + /* if new channel, add chan_pending */ + list_add_tail(&c->node, &d->chan_pending); + issue = 1; + dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); + } + spin_unlock(&d->lock); + } else { + dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); + } + spin_unlock_irqrestore(&c->vc.lock, flags); + + if (issue) + zx_dma_task(d); +} + +static void zx_dma_fill_desc(struct zx_dma_desc_sw *ds, dma_addr_t dst, + dma_addr_t src, size_t len, u32 num, u32 ccfg) +{ + if ((num + 1) < ds->desc_num) + ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) * + sizeof(struct zx_desc_hw); + ds->desc_hw[num].saddr = src; + ds->desc_hw[num].daddr = dst; + ds->desc_hw[num].src_x = len; + ds->desc_hw[num].ctr = ccfg; +} + +static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num, + struct dma_chan *chan) +{ + struct zx_dma_chan *c = to_zx_chan(chan); + struct zx_dma_desc_sw *ds; + struct zx_dma_dev *d = to_zx_dma(chan->device); + int lli_limit = LLI_BLOCK_SIZE / sizeof(struct zx_desc_hw); + + if (num > lli_limit) { + dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n", + &c->vc, num, lli_limit); + return NULL; + } + + ds = kzalloc(sizeof(*ds), GFP_ATOMIC); + if (!ds) + return NULL; + + ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli); + if (!ds->desc_hw) { + dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc); + kfree(ds); + return NULL; + } + memset(ds->desc_hw, sizeof(struct zx_desc_hw) * num, 0); + ds->desc_num = num; + return ds; +} + +static enum zx_dma_burst_width zx_dma_burst_width(enum dma_slave_buswidth width) +{ + switch (width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + case DMA_SLAVE_BUSWIDTH_2_BYTES: + case DMA_SLAVE_BUSWIDTH_4_BYTES: + case DMA_SLAVE_BUSWIDTH_8_BYTES: + return ffs(width) - 1; + default: + return ZX_DMA_WIDTH_32BIT; + } +} + +static int zx_pre_config(struct zx_dma_chan *c, enum dma_transfer_direction dir) +{ + struct dma_slave_config *cfg = &c->slave_cfg; + enum zx_dma_burst_width src_width; + enum zx_dma_burst_width dst_width; + u32 maxburst = 0; + + switch (dir) { + case DMA_MEM_TO_MEM: + c->ccfg = ZX_CH_ENABLE | ZX_SOFT_REQ + | ZX_SRC_BURST_LEN(ZX_MAX_BURST_LEN - 1) + | ZX_SRC_BURST_WIDTH(ZX_DMA_WIDTH_32BIT) + | ZX_DST_BURST_WIDTH(ZX_DMA_WIDTH_32BIT); + break; + case DMA_MEM_TO_DEV: + c->dev_addr = cfg->dst_addr; + /* dst len is calculated from src width, len and dst width. + * We need make sure dst len not exceed MAX LEN. + * Trailing single transaction that does not fill a full + * burst also require identical src/dst data width. + */ + dst_width = zx_dma_burst_width(cfg->dst_addr_width); + maxburst = cfg->dst_maxburst; + maxburst = maxburst < ZX_MAX_BURST_LEN ? + maxburst : ZX_MAX_BURST_LEN; + c->ccfg = ZX_DST_FIFO_MODE | ZX_CH_ENABLE + | ZX_SRC_BURST_LEN(maxburst - 1) + | ZX_SRC_BURST_WIDTH(dst_width) + | ZX_DST_BURST_WIDTH(dst_width); + break; + case DMA_DEV_TO_MEM: + c->dev_addr = cfg->src_addr; + src_width = zx_dma_burst_width(cfg->src_addr_width); + maxburst = cfg->src_maxburst; + maxburst = maxburst < ZX_MAX_BURST_LEN ? + maxburst : ZX_MAX_BURST_LEN; + c->ccfg = ZX_SRC_FIFO_MODE | ZX_CH_ENABLE + | ZX_SRC_BURST_LEN(maxburst - 1) + | ZX_SRC_BURST_WIDTH(src_width) + | ZX_DST_BURST_WIDTH(src_width); + break; + default: + return -EINVAL; + } + return 0; +} + +static struct dma_async_tx_descriptor *zx_dma_prep_memcpy( + struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct zx_dma_chan *c = to_zx_chan(chan); + struct zx_dma_desc_sw *ds; + size_t copy = 0; + int num = 0; + + if (!len) + return NULL; + + if (zx_pre_config(c, DMA_MEM_TO_MEM)) + return NULL; + + num = DIV_ROUND_UP(len, DMA_MAX_SIZE); + + ds = zx_alloc_desc_resource(num, chan); + if (!ds) + return NULL; + + ds->size = len; + num = 0; + + do { + copy = min_t(size_t, len, DMA_MAX_SIZE); + zx_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg); + + src += copy; + dst += copy; + len -= copy; + } while (len); + + c->cyclic = 0; + ds->desc_hw[num - 1].lli = 0; /* end of link */ + ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL; + return vchan_tx_prep(&c->vc, &ds->vd, flags); +} + +static struct dma_async_tx_descriptor *zx_dma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen, + enum dma_transfer_direction dir, unsigned long flags, void *context) +{ + struct zx_dma_chan *c = to_zx_chan(chan); + struct zx_dma_desc_sw *ds; + size_t len, avail, total = 0; + struct scatterlist *sg; + dma_addr_t addr, src = 0, dst = 0; + int num = sglen, i; + + if (!sgl) + return NULL; + + if (zx_pre_config(c, dir)) + return NULL; + + for_each_sg(sgl, sg, sglen, i) { + avail = sg_dma_len(sg); + if (avail > DMA_MAX_SIZE) + num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1; + } + + ds = zx_alloc_desc_resource(num, chan); + if (!ds) + return NULL; + + c->cyclic = 0; + num = 0; + for_each_sg(sgl, sg, sglen, i) { + addr = sg_dma_address(sg); + avail = sg_dma_len(sg); + total += avail; + + do { + len = min_t(size_t, avail, DMA_MAX_SIZE); + + if (dir == DMA_MEM_TO_DEV) { + src = addr; + dst = c->dev_addr; + } else if (dir == DMA_DEV_TO_MEM) { + src = c->dev_addr; + dst = addr; + } + + zx_dma_fill_desc(ds, dst, src, len, num++, c->ccfg); + + addr += len; + avail -= len; + } while (avail); + } + + ds->desc_hw[num - 1].lli = 0; /* end of link */ + ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL; + ds->size = total; + return vchan_tx_prep(&c->vc, &ds->vd, flags); +} + +static struct dma_async_tx_descriptor *zx_dma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction dir, + unsigned long flags) +{ + struct zx_dma_chan *c = to_zx_chan(chan); + struct zx_dma_desc_sw *ds; + dma_addr_t src = 0, dst = 0; + int num_periods = buf_len / period_len; + int buf = 0, num = 0; + + if (period_len > DMA_MAX_SIZE) { + dev_err(chan->device->dev, "maximum period size exceeded\n"); + return NULL; + } + + if (zx_pre_config(c, dir)) + return NULL; + + ds = zx_alloc_desc_resource(num_periods, chan); + if (!ds) + return NULL; + c->cyclic = 1; + + while (buf < buf_len) { + if (dir == DMA_MEM_TO_DEV) { + src = dma_addr; + dst = c->dev_addr; + } else if (dir == DMA_DEV_TO_MEM) { + src = c->dev_addr; + dst = dma_addr; + } + zx_dma_fill_desc(ds, dst, src, period_len, num++, + c->ccfg | ZX_IRQ_ENABLE_ALL); + dma_addr += period_len; + buf += period_len; + } + + ds->desc_hw[num - 1].lli = ds->desc_hw_lli; + ds->size = buf_len; + return vchan_tx_prep(&c->vc, &ds->vd, flags); +} + +static int zx_dma_config(struct dma_chan *chan, + struct dma_slave_config *cfg) +{ + struct zx_dma_chan *c = to_zx_chan(chan); + + if (!cfg) + return -EINVAL; + + memcpy(&c->slave_cfg, cfg, sizeof(*cfg)); + + return 0; +} + +static int zx_dma_terminate_all(struct dma_chan *chan) +{ + struct zx_dma_chan *c = to_zx_chan(chan); + struct zx_dma_dev *d = to_zx_dma(chan->device); + struct zx_dma_phy *p = c->phy; + unsigned long flags; + LIST_HEAD(head); + + dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); + + /* Prevent this channel being scheduled */ + spin_lock(&d->lock); + list_del_init(&c->node); + spin_unlock(&d->lock); + + /* Clear the tx descriptor lists */ + spin_lock_irqsave(&c->vc.lock, flags); + vchan_get_all_descriptors(&c->vc, &head); + if (p) { + /* vchan is assigned to a pchan - stop the channel */ + zx_dma_terminate_chan(p, d); + c->phy = NULL; + p->vchan = NULL; + p->ds_run = NULL; + p->ds_done = NULL; + } + spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); + + return 0; +} + +static int zx_dma_transfer_pause(struct dma_chan *chan) +{ + struct zx_dma_chan *c = to_zx_chan(chan); + u32 val = 0; + + val = readl_relaxed(c->phy->base + REG_ZX_CTRL); + val &= ~ZX_CH_ENABLE; + writel_relaxed(val, c->phy->base + REG_ZX_CTRL); + + return 0; +} + +static int zx_dma_transfer_resume(struct dma_chan *chan) +{ + struct zx_dma_chan *c = to_zx_chan(chan); + u32 val = 0; + + val = readl_relaxed(c->phy->base + REG_ZX_CTRL); + val |= ZX_CH_ENABLE; + writel_relaxed(val, c->phy->base + REG_ZX_CTRL); + + return 0; +} + +static void zx_dma_free_desc(struct virt_dma_desc *vd) +{ + struct zx_dma_desc_sw *ds = + container_of(vd, struct zx_dma_desc_sw, vd); + struct zx_dma_dev *d = to_zx_dma(vd->tx.chan->device); + + dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli); + kfree(ds); +} + +static const struct of_device_id zx6702_dma_dt_ids[] = { + { .compatible = "zte,zx296702-dma", }, + {} +}; +MODULE_DEVICE_TABLE(of, zx6702_dma_dt_ids); + +static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct zx_dma_dev *d = ofdma->of_dma_data; + unsigned int request = dma_spec->args[0]; + struct dma_chan *chan; + struct zx_dma_chan *c; + + if (request >= d->dma_requests) + return NULL; + + chan = dma_get_any_slave_channel(&d->slave); + if (!chan) { + dev_err(d->slave.dev, "get channel fail in %s.\n", __func__); + return NULL; + } + c = to_zx_chan(chan); + c->id = request; + dev_info(d->slave.dev, "zx_dma: pchan %u: alloc vchan %p\n", + c->id, &c->vc); + return chan; +} + +static int zx_dma_probe(struct platform_device *op) +{ + struct zx_dma_dev *d; + struct resource *iores; + int i, ret = 0; + + iores = platform_get_resource(op, IORESOURCE_MEM, 0); + if (!iores) + return -EINVAL; + + d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL); + if (!d) + return -ENOMEM; + + d->base = devm_ioremap_resource(&op->dev, iores); + if (IS_ERR(d->base)) + return PTR_ERR(d->base); + + of_property_read_u32((&op->dev)->of_node, + "dma-channels", &d->dma_channels); + of_property_read_u32((&op->dev)->of_node, + "dma-requests", &d->dma_requests); + if (!d->dma_requests || !d->dma_channels) + return -EINVAL; + + d->clk = devm_clk_get(&op->dev, NULL); + if (IS_ERR(d->clk)) { + dev_err(&op->dev, "no dma clk\n"); + return PTR_ERR(d->clk); + } + + d->irq = platform_get_irq(op, 0); + ret = devm_request_irq(&op->dev, d->irq, zx_dma_int_handler, + 0, DRIVER_NAME, d); + if (ret) + return ret; + + /* A DMA memory pool for LLIs, align on 32-byte boundary */ + d->pool = dmam_pool_create(DRIVER_NAME, &op->dev, + LLI_BLOCK_SIZE, 32, 0); + if (!d->pool) + return -ENOMEM; + + /* init phy channel */ + d->phy = devm_kzalloc(&op->dev, + d->dma_channels * sizeof(struct zx_dma_phy), GFP_KERNEL); + if (!d->phy) + return -ENOMEM; + + for (i = 0; i < d->dma_channels; i++) { + struct zx_dma_phy *p = &d->phy[i]; + + p->idx = i; + p->base = d->base + i * 0x40; + } + + INIT_LIST_HEAD(&d->slave.channels); + dma_cap_set(DMA_SLAVE, d->slave.cap_mask); + dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); + dma_cap_set(DMA_PRIVATE, d->slave.cap_mask); + d->slave.dev = &op->dev; + d->slave.device_free_chan_resources = zx_dma_free_chan_resources; + d->slave.device_tx_status = zx_dma_tx_status; + d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy; + d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg; + d->slave.device_prep_dma_cyclic = zx_dma_prep_dma_cyclic; + d->slave.device_issue_pending = zx_dma_issue_pending; + d->slave.device_config = zx_dma_config; + d->slave.device_terminate_all = zx_dma_terminate_all; + d->slave.device_pause = zx_dma_transfer_pause; + d->slave.device_resume = zx_dma_transfer_resume; + d->slave.copy_align = DMA_ALIGN; + d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS; + d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS; + d->slave.directions = BIT(DMA_MEM_TO_MEM) | BIT(DMA_MEM_TO_DEV) + | BIT(DMA_DEV_TO_MEM); + d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; + + /* init virtual channel */ + d->chans = devm_kzalloc(&op->dev, + d->dma_requests * sizeof(struct zx_dma_chan), GFP_KERNEL); + if (!d->chans) + return -ENOMEM; + + for (i = 0; i < d->dma_requests; i++) { + struct zx_dma_chan *c = &d->chans[i]; + + c->status = DMA_IN_PROGRESS; + INIT_LIST_HEAD(&c->node); + c->vc.desc_free = zx_dma_free_desc; + vchan_init(&c->vc, &d->slave); + } + + /* Enable clock before accessing registers */ + ret = clk_prepare_enable(d->clk); + if (ret < 0) { + dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret); + goto zx_dma_out; + } + + zx_dma_init_state(d); + + spin_lock_init(&d->lock); + INIT_LIST_HEAD(&d->chan_pending); + platform_set_drvdata(op, d); + + ret = dma_async_device_register(&d->slave); + if (ret) + goto clk_dis; + + ret = of_dma_controller_register((&op->dev)->of_node, + zx_of_dma_simple_xlate, d); + if (ret) + goto of_dma_register_fail; + + dev_info(&op->dev, "initialized\n"); + return 0; + +of_dma_register_fail: + dma_async_device_unregister(&d->slave); +clk_dis: + clk_disable_unprepare(d->clk); +zx_dma_out: + return ret; +} + +static int zx_dma_remove(struct platform_device *op) +{ + struct zx_dma_chan *c, *cn; + struct zx_dma_dev *d = platform_get_drvdata(op); + + /* explictly free the irq */ + devm_free_irq(&op->dev, d->irq, d); + + dma_async_device_unregister(&d->slave); + of_dma_controller_free((&op->dev)->of_node); + + list_for_each_entry_safe(c, cn, &d->slave.channels, + vc.chan.device_node) { + list_del(&c->vc.chan.device_node); + } + clk_disable_unprepare(d->clk); + dmam_pool_destroy(d->pool); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int zx_dma_suspend_dev(struct device *dev) +{ + struct zx_dma_dev *d = dev_get_drvdata(dev); + u32 stat = 0; + + stat = zx_dma_get_chan_stat(d); + if (stat) { + dev_warn(d->slave.dev, + "chan %d is running fail to suspend\n", stat); + return -1; + } + clk_disable_unprepare(d->clk); + return 0; +} + +static int zx_dma_resume_dev(struct device *dev) +{ + struct zx_dma_dev *d = dev_get_drvdata(dev); + int ret = 0; + + ret = clk_prepare_enable(d->clk); + if (ret < 0) { + dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret); + return ret; + } + zx_dma_init_state(d); + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(zx_dma_pmops, zx_dma_suspend_dev, zx_dma_resume_dev); + +static struct platform_driver zx_pdma_driver = { + .driver = { + .name = DRIVER_NAME, + .pm = &zx_dma_pmops, + .of_match_table = zx6702_dma_dt_ids, + }, + .probe = zx_dma_probe, + .remove = zx_dma_remove, +}; + +module_platform_driver(zx_pdma_driver); + +MODULE_DESCRIPTION("ZTE ZX296702 DMA Driver"); +MODULE_AUTHOR("Jun Nie jun.nie@linaro.org"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 8677ead2a..ef25000a5 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -61,16 +61,6 @@ config EDAC_DECODE_MCE which occur really early upon boot, before the module infrastructure has been initialized. -config EDAC_MCE_INJ - tristate "Simple MCE injection interface" - depends on EDAC_DECODE_MCE && DEBUG_FS - default n - help - This is a simple debugfs interface to inject MCEs and test different - aspects of the MCE handling code. - - WARNING: Do not even assume this interface is staying stable! - config EDAC_MM_EDAC tristate "Main Memory EDAC (Error Detection And Correction) reporting" select RAS diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index 28ef2a519..ae3c5f3ce 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -17,7 +17,6 @@ edac_core-y += edac_pci.o edac_pci_sysfs.o endif obj-$(CONFIG_EDAC_GHES) += ghes_edac.o -obj-$(CONFIG_EDAC_MCE_INJ) += mce_amd_inj.o edac_mce_amd-y := mce_amd.o obj-$(CONFIG_EDAC_DECODE_MCE) += edac_mce_amd.o diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index 58586d59b..e3a945ce3 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -763,7 +763,8 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data) c->x86, c->x86_model, c->x86_mask, m->bank, ((m->status & MCI_STATUS_OVER) ? "Over" : "-"), - ((m->status & MCI_STATUS_UC) ? "UE" : "CE"), + ((m->status & MCI_STATUS_UC) ? "UE" : + (m->status & MCI_STATUS_DEFERRED) ? "-" : "CE"), ((m->status & MCI_STATUS_MISCV) ? "MiscV" : "-"), ((m->status & MCI_STATUS_PCC) ? "PCC" : "-"), ((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-")); diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c deleted file mode 100644 index 4c73e4d03..000000000 --- a/drivers/edac/mce_amd_inj.c +++ /dev/null @@ -1,375 +0,0 @@ -/* - * A simple MCE injection facility for testing different aspects of the RAS - * code. This driver should be built as module so that it can be loaded - * on production kernels for testing purposes. - * - * This file may be distributed under the terms of the GNU General Public - * License version 2. - * - * Copyright (c) 2010-14: Borislav Petkov - * Advanced Micro Devices Inc. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "mce_amd.h" - -/* - * Collect all the MCi_XXX settings - */ -static struct mce i_mce; -static struct dentry *dfs_inj; - -static u8 n_banks; - -#define MAX_FLAG_OPT_SIZE 3 - -enum injection_type { - SW_INJ = 0, /* SW injection, simply decode the error */ - HW_INJ, /* Trigger a #MC */ - N_INJ_TYPES, -}; - -static const char * const flags_options[] = { - [SW_INJ] = "sw", - [HW_INJ] = "hw", - NULL -}; - -/* Set default injection to SW_INJ */ -static enum injection_type inj_type = SW_INJ; - -#define MCE_INJECT_SET(reg) \ -static int inj_##reg##_set(void *data, u64 val) \ -{ \ - struct mce *m = (struct mce *)data; \ - \ - m->reg = val; \ - return 0; \ -} - -MCE_INJECT_SET(status); -MCE_INJECT_SET(misc); -MCE_INJECT_SET(addr); - -#define MCE_INJECT_GET(reg) \ -static int inj_##reg##_get(void *data, u64 *val) \ -{ \ - struct mce *m = (struct mce *)data; \ - \ - *val = m->reg; \ - return 0; \ -} - -MCE_INJECT_GET(status); -MCE_INJECT_GET(misc); -MCE_INJECT_GET(addr); - -DEFINE_SIMPLE_ATTRIBUTE(status_fops, inj_status_get, inj_status_set, "%llx\n"); -DEFINE_SIMPLE_ATTRIBUTE(misc_fops, inj_misc_get, inj_misc_set, "%llx\n"); -DEFINE_SIMPLE_ATTRIBUTE(addr_fops, inj_addr_get, inj_addr_set, "%llx\n"); - -/* - * Caller needs to be make sure this cpu doesn't disappear - * from under us, i.e.: get_cpu/put_cpu. - */ -static int toggle_hw_mce_inject(unsigned int cpu, bool enable) -{ - u32 l, h; - int err; - - err = rdmsr_on_cpu(cpu, MSR_K7_HWCR, &l, &h); - if (err) { - pr_err("%s: error reading HWCR\n", __func__); - return err; - } - - enable ? (l |= BIT(18)) : (l &= ~BIT(18)); - - err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, l, h); - if (err) - pr_err("%s: error writing HWCR\n", __func__); - - return err; -} - -static int __set_inj(const char *buf) -{ - int i; - - for (i = 0; i < N_INJ_TYPES; i++) { - if (!strncmp(flags_options[i], buf, strlen(flags_options[i]))) { - inj_type = i; - return 0; - } - } - return -EINVAL; -} - -static ssize_t flags_read(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - char buf[MAX_FLAG_OPT_SIZE]; - int n; - - n = sprintf(buf, "%s\n", flags_options[inj_type]); - - return simple_read_from_buffer(ubuf, cnt, ppos, buf, n); -} - -static ssize_t flags_write(struct file *filp, const char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - char buf[MAX_FLAG_OPT_SIZE], *__buf; - int err; - size_t ret; - - if (cnt > MAX_FLAG_OPT_SIZE) - cnt = MAX_FLAG_OPT_SIZE; - - ret = cnt; - - if (copy_from_user(&buf, ubuf, cnt)) - return -EFAULT; - - buf[cnt - 1] = 0; - - /* strip whitespace */ - __buf = strstrip(buf); - - err = __set_inj(__buf); - if (err) { - pr_err("%s: Invalid flags value: %s\n", __func__, __buf); - return err; - } - - *ppos += ret; - - return ret; -} - -static const struct file_operations flags_fops = { - .read = flags_read, - .write = flags_write, - .llseek = generic_file_llseek, -}; - -/* - * On which CPU to inject? - */ -MCE_INJECT_GET(extcpu); - -static int inj_extcpu_set(void *data, u64 val) -{ - struct mce *m = (struct mce *)data; - - if (val >= nr_cpu_ids || !cpu_online(val)) { - pr_err("%s: Invalid CPU: %llu\n", __func__, val); - return -EINVAL; - } - m->extcpu = val; - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(extcpu_fops, inj_extcpu_get, inj_extcpu_set, "%llu\n"); - -static void trigger_mce(void *info) -{ - asm volatile("int $18"); -} - -static void do_inject(void) -{ - u64 mcg_status = 0; - unsigned int cpu = i_mce.extcpu; - u8 b = i_mce.bank; - - if (i_mce.misc) - i_mce.status |= MCI_STATUS_MISCV; - - if (inj_type == SW_INJ) { - amd_decode_mce(NULL, 0, &i_mce); - return; - } - - /* prep MCE global settings for the injection */ - mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV; - - if (!(i_mce.status & MCI_STATUS_PCC)) - mcg_status |= MCG_STATUS_RIPV; - - get_online_cpus(); - if (!cpu_online(cpu)) - goto err; - - toggle_hw_mce_inject(cpu, true); - - wrmsr_on_cpu(cpu, MSR_IA32_MCG_STATUS, - (u32)mcg_status, (u32)(mcg_status >> 32)); - - wrmsr_on_cpu(cpu, MSR_IA32_MCx_STATUS(b), - (u32)i_mce.status, (u32)(i_mce.status >> 32)); - - wrmsr_on_cpu(cpu, MSR_IA32_MCx_ADDR(b), - (u32)i_mce.addr, (u32)(i_mce.addr >> 32)); - - wrmsr_on_cpu(cpu, MSR_IA32_MCx_MISC(b), - (u32)i_mce.misc, (u32)(i_mce.misc >> 32)); - - toggle_hw_mce_inject(cpu, false); - - smp_call_function_single(cpu, trigger_mce, NULL, 0); - -err: - put_online_cpus(); - -} - -/* - * This denotes into which bank we're injecting and triggers - * the injection, at the same time. - */ -static int inj_bank_set(void *data, u64 val) -{ - struct mce *m = (struct mce *)data; - - if (val >= n_banks) { - pr_err("Non-existent MCE bank: %llu\n", val); - return -EINVAL; - } - - m->bank = val; - do_inject(); - - return 0; -} - -MCE_INJECT_GET(bank); - -DEFINE_SIMPLE_ATTRIBUTE(bank_fops, inj_bank_get, inj_bank_set, "%llu\n"); - -static const char readme_msg[] = -"Description of the files and their usages:\n" -"\n" -"Note1: i refers to the bank number below.\n" -"Note2: See respective BKDGs for the exact bit definitions of the files below\n" -"as they mirror the hardware registers.\n" -"\n" -"status:\t Set MCi_STATUS: the bits in that MSR control the error type and\n" -"\t attributes of the error which caused the MCE.\n" -"\n" -"misc:\t Set MCi_MISC: provide auxiliary info about the error. It is mostly\n" -"\t used for error thresholding purposes and its validity is indicated by\n" -"\t MCi_STATUS[MiscV].\n" -"\n" -"addr:\t Error address value to be written to MCi_ADDR. Log address information\n" -"\t associated with the error.\n" -"\n" -"cpu:\t The CPU to inject the error on.\n" -"\n" -"bank:\t Specify the bank you want to inject the error into: the number of\n" -"\t banks in a processor varies and is family/model-specific, therefore, the\n" -"\t supplied value is sanity-checked. Setting the bank value also triggers the\n" -"\t injection.\n" -"\n" -"flags:\t Injection type to be performed. Writing to this file will trigger a\n" -"\t real machine check, an APIC interrupt or invoke the error decoder routines\n" -"\t for AMD processors.\n" -"\n" -"\t Allowed error injection types:\n" -"\t - \"sw\": Software error injection. Decode error to a human-readable \n" -"\t format only. Safe to use.\n" -"\t - \"hw\": Hardware error injection. Causes the #MC exception handler to \n" -"\t handle the error. Be warned: might cause system panic if MCi_STATUS[PCC] \n" -"\t is set. Therefore, consider setting (debugfs_mountpoint)/mce/fake_panic \n" -"\t before injecting.\n" -"\n"; - -static ssize_t -inj_readme_read(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - return simple_read_from_buffer(ubuf, cnt, ppos, - readme_msg, strlen(readme_msg)); -} - -static const struct file_operations readme_fops = { - .read = inj_readme_read, -}; - -static struct dfs_node { - char *name; - struct dentry *d; - const struct file_operations *fops; - umode_t perm; -} dfs_fls[] = { - { .name = "status", .fops = &status_fops, .perm = S_IRUSR | S_IWUSR }, - { .name = "misc", .fops = &misc_fops, .perm = S_IRUSR | S_IWUSR }, - { .name = "addr", .fops = &addr_fops, .perm = S_IRUSR | S_IWUSR }, - { .name = "bank", .fops = &bank_fops, .perm = S_IRUSR | S_IWUSR }, - { .name = "flags", .fops = &flags_fops, .perm = S_IRUSR | S_IWUSR }, - { .name = "cpu", .fops = &extcpu_fops, .perm = S_IRUSR | S_IWUSR }, - { .name = "README", .fops = &readme_fops, .perm = S_IRUSR | S_IRGRP | S_IROTH }, -}; - -static int __init init_mce_inject(void) -{ - int i; - u64 cap; - - rdmsrl(MSR_IA32_MCG_CAP, cap); - n_banks = cap & MCG_BANKCNT_MASK; - - dfs_inj = debugfs_create_dir("mce-inject", NULL); - if (!dfs_inj) - return -EINVAL; - - for (i = 0; i < ARRAY_SIZE(dfs_fls); i++) { - dfs_fls[i].d = debugfs_create_file(dfs_fls[i].name, - dfs_fls[i].perm, - dfs_inj, - &i_mce, - dfs_fls[i].fops); - - if (!dfs_fls[i].d) - goto err_dfs_add; - } - - return 0; - -err_dfs_add: - while (--i >= 0) - debugfs_remove(dfs_fls[i].d); - - debugfs_remove(dfs_inj); - dfs_inj = NULL; - - return -ENOMEM; -} - -static void __exit exit_mce_inject(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(dfs_fls); i++) - debugfs_remove(dfs_fls[i].d); - - memset(&dfs_fls, 0, sizeof(dfs_fls)); - - debugfs_remove(dfs_inj); - dfs_inj = NULL; -} -module_init(init_mce_inject); -module_exit(exit_mce_inject); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Borislav Petkov "); -MODULE_AUTHOR("AMD Inc."); -MODULE_DESCRIPTION("MCE injection facility for RAS testing"); diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index ca7831168..cf1268dde 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -280,6 +280,7 @@ struct sbridge_info { u8 max_interleave; u8 (*get_node_id)(struct sbridge_pvt *pvt); enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt); + enum dev_type (*get_width)(struct sbridge_pvt *pvt, u32 mtr); struct pci_dev *pci_vtd; }; @@ -471,6 +472,9 @@ static const struct pci_id_table pci_dev_descr_ibridge_table[] = { #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1 0x2fbf +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2 0x2fb9 +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb static const struct pci_id_descr pci_dev_descr_haswell[] = { /* first item must be the HA */ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0) }, @@ -488,6 +492,9 @@ static const struct pci_id_descr pci_dev_descr_haswell[] = { { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1) }, { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1, 1) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2, 1) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3, 1) }, { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1) }, { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL, 1) }, @@ -762,6 +769,49 @@ out: return mtype; } +static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr) +{ + /* there's no way to figure out */ + return DEV_UNKNOWN; +} + +static enum dev_type __ibridge_get_width(u32 mtr) +{ + enum dev_type type; + + switch (mtr) { + case 3: + type = DEV_UNKNOWN; + break; + case 2: + type = DEV_X16; + break; + case 1: + type = DEV_X8; + break; + case 0: + type = DEV_X4; + break; + } + + return type; +} + +static enum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr) +{ + /* + * ddr3_width on the documentation but also valid for DDR4 on + * Haswell + */ + return __ibridge_get_width(GET_BITFIELD(mtr, 7, 8)); +} + +static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr) +{ + /* ddr3_width on the documentation but also valid for DDR4 */ + return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9)); +} + static u8 get_node_id(struct sbridge_pvt *pvt) { u32 reg; @@ -966,17 +1016,7 @@ static int get_dimm_config(struct mem_ctl_info *mci) dimm->nr_pages = npages; dimm->grain = 32; - switch (banks) { - case 16: - dimm->dtype = DEV_X16; - break; - case 8: - dimm->dtype = DEV_X8; - break; - case 4: - dimm->dtype = DEV_X4; - break; - } + dimm->dtype = pvt->info.get_width(pvt, mtr); dimm->mtype = mtype; dimm->edac_mode = mode; snprintf(dimm->label, sizeof(dimm->label), @@ -1869,7 +1909,11 @@ static int haswell_mci_bind_devs(struct mem_ctl_info *mci, } break; case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0: - pvt->pci_ddrio = pdev; + case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1: + case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2: + case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3: + if (!pvt->pci_ddrio) + pvt->pci_ddrio = pdev; break; case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1: pvt->pci_ha1 = pdev; @@ -2361,6 +2405,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) pvt->info.interleave_list = ibridge_interleave_list; pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); pvt->info.interleave_pkg = ibridge_interleave_pkg; + pvt->info.get_width = ibridge_get_width; mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx); /* Store pci devices at mci for faster access */ @@ -2380,6 +2425,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) pvt->info.interleave_list = sbridge_interleave_list; pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list); pvt->info.interleave_pkg = sbridge_interleave_pkg; + pvt->info.get_width = sbridge_get_width; mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx); /* Store pci devices at mci for faster access */ @@ -2399,6 +2445,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) pvt->info.interleave_list = ibridge_interleave_list; pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); pvt->info.interleave_pkg = ibridge_interleave_pkg; + pvt->info.get_width = ibridge_get_width; mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell Socket#%d", mci->mc_idx); /* Store pci devices at mci for faster access */ @@ -2418,6 +2465,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) pvt->info.interleave_list = ibridge_interleave_list; pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); pvt->info.interleave_pkg = ibridge_interleave_pkg; + pvt->info.get_width = broadwell_get_width; mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell Socket#%d", mci->mc_idx); /* Store pci devices at mci for faster access */ diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c index 14636e4b6..ba06904af 100644 --- a/drivers/edac/xgene_edac.c +++ b/drivers/edac/xgene_edac.c @@ -1168,7 +1168,6 @@ static struct platform_driver xgene_edac_driver = { .remove = xgene_edac_remove, .driver = { .name = "xgene-edac", - .owner = THIS_MODULE, .of_match_table = xgene_edac_of_match, }, }; diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c index ad87f2630..4b9f09cc3 100644 --- a/drivers/extcon/extcon-arizona.c +++ b/drivers/extcon/extcon-arizona.c @@ -20,10 +20,12 @@ #include #include #include +#include #include #include #include #include +#include #include #include @@ -46,6 +48,9 @@ #define HPDET_DEBOUNCE 500 #define DEFAULT_MICD_TIMEOUT 2000 +#define MICD_DBTIME_TWO_READINGS 2 +#define MICD_DBTIME_FOUR_READINGS 4 + #define MICD_LVL_1_TO_7 (ARIZONA_MICD_LVL_1 | ARIZONA_MICD_LVL_2 | \ ARIZONA_MICD_LVL_3 | ARIZONA_MICD_LVL_4 | \ ARIZONA_MICD_LVL_5 | ARIZONA_MICD_LVL_6 | \ @@ -94,6 +99,8 @@ struct arizona_extcon_info { int hpdet_ip_version; struct extcon_dev *edev; + + struct gpio_desc *micd_pol_gpio; }; static const struct arizona_micd_config micd_default_modes[] = { @@ -204,6 +211,10 @@ static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode) if (arizona->pdata.micd_pol_gpio > 0) gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio, info->micd_modes[mode].gpio); + else + gpiod_set_value_cansleep(info->micd_pol_gpio, + info->micd_modes[mode].gpio); + regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1, ARIZONA_MICD_BIAS_SRC_MASK, info->micd_modes[mode].bias << @@ -757,10 +768,11 @@ static void arizona_micd_timeout_work(struct work_struct *work) mutex_lock(&info->lock); dev_dbg(info->arizona->dev, "MICD timed out, reporting HP\n"); - arizona_identify_headphone(info); info->detecting = false; + arizona_identify_headphone(info); + arizona_stop_mic(info); mutex_unlock(&info->lock); @@ -820,12 +832,18 @@ static void arizona_micd_detect(struct work_struct *work) /* Due to jack detect this should never happen */ if (!(val & ARIZONA_MICD_STS)) { dev_warn(arizona->dev, "Detected open circuit\n"); + info->mic = false; + arizona_stop_mic(info); info->detecting = false; + arizona_identify_headphone(info); goto handled; } /* If we got a high impedence we should have a headset, report it. */ if (info->detecting && (val & ARIZONA_MICD_LVL_8)) { + info->mic = true; + info->detecting = false; + arizona_identify_headphone(info); ret = extcon_set_cable_state_(info->edev, @@ -841,8 +859,6 @@ static void arizona_micd_detect(struct work_struct *work) ret); } - info->mic = true; - info->detecting = false; goto handled; } @@ -855,10 +871,11 @@ static void arizona_micd_detect(struct work_struct *work) if (info->detecting && (val & MICD_LVL_1_TO_7)) { if (info->jack_flips >= info->micd_num_modes * 10) { dev_dbg(arizona->dev, "Detected HP/line\n"); - arizona_identify_headphone(info); info->detecting = false; + arizona_identify_headphone(info); + arizona_stop_mic(info); } else { info->micd_mode++; @@ -1110,12 +1127,12 @@ static void arizona_micd_set_level(struct arizona *arizona, int index, regmap_update_bits(arizona->regmap, reg, mask, level); } -static int arizona_extcon_of_get_pdata(struct arizona *arizona) +static int arizona_extcon_device_get_pdata(struct arizona *arizona) { struct arizona_pdata *pdata = &arizona->pdata; unsigned int val = ARIZONA_ACCDET_MODE_HPL; - of_property_read_u32(arizona->dev->of_node, "wlf,hpdet-channel", &val); + device_property_read_u32(arizona->dev, "wlf,hpdet-channel", &val); switch (val) { case ARIZONA_ACCDET_MODE_HPL: case ARIZONA_ACCDET_MODE_HPR: @@ -1127,6 +1144,24 @@ static int arizona_extcon_of_get_pdata(struct arizona *arizona) pdata->hpdet_channel = ARIZONA_ACCDET_MODE_HPL; } + device_property_read_u32(arizona->dev, "wlf,micd-detect-debounce", + &pdata->micd_detect_debounce); + + device_property_read_u32(arizona->dev, "wlf,micd-bias-start-time", + &pdata->micd_bias_start_time); + + device_property_read_u32(arizona->dev, "wlf,micd-rate", + &pdata->micd_rate); + + device_property_read_u32(arizona->dev, "wlf,micd-dbtime", + &pdata->micd_dbtime); + + device_property_read_u32(arizona->dev, "wlf,micd-timeout", + &pdata->micd_timeout); + + pdata->micd_force_micbias = device_property_read_bool(arizona->dev, + "wlf,micd-force-micbias"); + return 0; } @@ -1147,10 +1182,8 @@ static int arizona_extcon_probe(struct platform_device *pdev) if (!info) return -ENOMEM; - if (IS_ENABLED(CONFIG_OF)) { - if (!dev_get_platdata(arizona->dev)) - arizona_extcon_of_get_pdata(arizona); - } + if (!dev_get_platdata(arizona->dev)) + arizona_extcon_device_get_pdata(arizona); info->micvdd = devm_regulator_get(&pdev->dev, "MICVDD"); if (IS_ERR(info->micvdd)) { @@ -1241,6 +1274,27 @@ static int arizona_extcon_probe(struct platform_device *pdev) arizona->pdata.micd_pol_gpio, ret); goto err_register; } + } else { + if (info->micd_modes[0].gpio) + mode = GPIOD_OUT_HIGH; + else + mode = GPIOD_OUT_LOW; + + /* We can't use devm here because we need to do the get + * against the MFD device, as that is where the of_node + * will reside, but if we devm against that the GPIO + * will not be freed if the extcon driver is unloaded. + */ + info->micd_pol_gpio = gpiod_get_optional(arizona->dev, + "wlf,micd-pol", + GPIOD_OUT_LOW); + if (IS_ERR(info->micd_pol_gpio)) { + ret = PTR_ERR(info->micd_pol_gpio); + dev_err(arizona->dev, + "Failed to get microphone polarity GPIO: %d\n", + ret); + goto err_register; + } } if (arizona->pdata.hpdet_id_gpio > 0) { @@ -1251,7 +1305,7 @@ static int arizona_extcon_probe(struct platform_device *pdev) if (ret != 0) { dev_err(arizona->dev, "Failed to request GPIO%d: %d\n", arizona->pdata.hpdet_id_gpio, ret); - goto err_register; + goto err_gpio; } } @@ -1267,11 +1321,19 @@ static int arizona_extcon_probe(struct platform_device *pdev) arizona->pdata.micd_rate << ARIZONA_MICD_RATE_SHIFT); - if (arizona->pdata.micd_dbtime) + switch (arizona->pdata.micd_dbtime) { + case MICD_DBTIME_FOUR_READINGS: regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1, ARIZONA_MICD_DBTIME_MASK, - arizona->pdata.micd_dbtime - << ARIZONA_MICD_DBTIME_SHIFT); + ARIZONA_MICD_DBTIME); + break; + case MICD_DBTIME_TWO_READINGS: + regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1, + ARIZONA_MICD_DBTIME_MASK, 0); + break; + default: + break; + } BUILD_BUG_ON(ARRAY_SIZE(arizona_micd_levels) != 0x40); @@ -1295,7 +1357,7 @@ static int arizona_extcon_probe(struct platform_device *pdev) dev_err(arizona->dev, "MICD ranges must be sorted\n"); ret = -EINVAL; - goto err_input; + goto err_gpio; } } } @@ -1314,7 +1376,7 @@ static int arizona_extcon_probe(struct platform_device *pdev) dev_err(arizona->dev, "Unsupported MICD level %d\n", info->micd_ranges[i].max); ret = -EINVAL; - goto err_input; + goto err_gpio; } dev_dbg(arizona->dev, "%d ohms for MICD threshold %d\n", @@ -1387,7 +1449,7 @@ static int arizona_extcon_probe(struct platform_device *pdev) if (ret != 0) { dev_err(&pdev->dev, "Failed to get JACKDET rise IRQ: %d\n", ret); - goto err_input; + goto err_gpio; } ret = arizona_set_irq_wake(arizona, jack_irq_rise, 1); @@ -1458,7 +1520,8 @@ err_rise_wake: arizona_set_irq_wake(arizona, jack_irq_rise, 0); err_rise: arizona_free_irq(arizona, jack_irq_rise, info); -err_input: +err_gpio: + gpiod_put(info->micd_pol_gpio); err_register: pm_runtime_disable(&pdev->dev); return ret; @@ -1470,6 +1533,8 @@ static int arizona_extcon_remove(struct platform_device *pdev) struct arizona *arizona = info->arizona; int jack_irq_rise, jack_irq_fall; + gpiod_put(info->micd_pol_gpio); + pm_runtime_disable(&pdev->dev); regmap_update_bits(arizona->regmap, diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c index 355459a54..57c24fa52 100644 --- a/drivers/extcon/extcon-gpio.c +++ b/drivers/extcon/extcon-gpio.c @@ -65,22 +65,6 @@ static irqreturn_t gpio_irq_handler(int irq, void *dev_id) return IRQ_HANDLED; } -static ssize_t extcon_gpio_print_state(struct extcon_dev *edev, char *buf) -{ - struct device *dev = edev->dev.parent; - struct gpio_extcon_data *extcon_data = dev_get_drvdata(dev); - const char *state; - - if (extcon_get_state(edev)) - state = extcon_data->state_on; - else - state = extcon_data->state_off; - - if (state) - return sprintf(buf, "%s\n", state); - return -EINVAL; -} - static int gpio_extcon_probe(struct platform_device *pdev) { struct gpio_extcon_platform_data *pdata = dev_get_platdata(&pdev->dev); @@ -110,8 +94,6 @@ static int gpio_extcon_probe(struct platform_device *pdev) extcon_data->state_on = pdata->state_on; extcon_data->state_off = pdata->state_off; extcon_data->check_on_resume = pdata->check_on_resume; - if (pdata->state_on && pdata->state_off) - extcon_data->edev->print_state = extcon_gpio_print_state; ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN, pdev->name); diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c index f4f3b3d53..35b9e118b 100644 --- a/drivers/extcon/extcon-max77693.c +++ b/drivers/extcon/extcon-max77693.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -42,7 +43,7 @@ static struct max77693_reg_data default_init_data[] = { { /* STATUS2 - [3]ChgDetRun */ .addr = MAX77693_MUIC_REG_STATUS2, - .data = STATUS2_CHGDETRUN_MASK, + .data = MAX77693_STATUS2_CHGDETRUN_MASK, }, { /* INTMASK1 - Unmask [3]ADC1KM,[0]ADCM */ .addr = MAX77693_MUIC_REG_INTMASK1, @@ -235,7 +236,7 @@ static int max77693_muic_set_debounce_time(struct max77693_muic_info *info, */ ret = regmap_write(info->max77693->regmap_muic, MAX77693_MUIC_REG_CTRL3, - time << CONTROL3_ADCDBSET_SHIFT); + time << MAX77693_CONTROL3_ADCDBSET_SHIFT); if (ret) { dev_err(info->dev, "failed to set ADC debounce time\n"); return ret; @@ -268,7 +269,7 @@ static int max77693_muic_set_path(struct max77693_muic_info *info, if (attached) ctrl1 = val; else - ctrl1 = CONTROL1_SW_OPEN; + ctrl1 = MAX77693_CONTROL1_SW_OPEN; ret = regmap_update_bits(info->max77693->regmap_muic, MAX77693_MUIC_REG_CTRL1, COMP_SW_MASK, ctrl1); @@ -278,13 +279,14 @@ static int max77693_muic_set_path(struct max77693_muic_info *info, } if (attached) - ctrl2 |= CONTROL2_CPEN_MASK; /* LowPwr=0, CPEn=1 */ + ctrl2 |= MAX77693_CONTROL2_CPEN_MASK; /* LowPwr=0, CPEn=1 */ else - ctrl2 |= CONTROL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */ + ctrl2 |= MAX77693_CONTROL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */ ret = regmap_update_bits(info->max77693->regmap_muic, MAX77693_MUIC_REG_CTRL2, - CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK, ctrl2); + MAX77693_CONTROL2_LOWPWR_MASK | MAX77693_CONTROL2_CPEN_MASK, + ctrl2); if (ret < 0) { dev_err(info->dev, "failed to update MUIC register\n"); return ret; @@ -326,8 +328,8 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info, * Read ADC value to check cable type and decide cable state * according to cable type */ - adc = info->status[0] & STATUS1_ADC_MASK; - adc >>= STATUS1_ADC_SHIFT; + adc = info->status[0] & MAX77693_STATUS1_ADC_MASK; + adc >>= MAX77693_STATUS1_ADC_SHIFT; /* * Check current cable state/cable type and store cable type @@ -350,8 +352,8 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info, * Read ADC value to check cable type and decide cable state * according to cable type */ - adc = info->status[0] & STATUS1_ADC_MASK; - adc >>= STATUS1_ADC_SHIFT; + adc = info->status[0] & MAX77693_STATUS1_ADC_MASK; + adc >>= MAX77693_STATUS1_ADC_SHIFT; /* * Check current cable state/cable type and store cable type @@ -366,13 +368,13 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info, } else { *attached = true; - adclow = info->status[0] & STATUS1_ADCLOW_MASK; - adclow >>= STATUS1_ADCLOW_SHIFT; - adc1k = info->status[0] & STATUS1_ADC1K_MASK; - adc1k >>= STATUS1_ADC1K_SHIFT; + adclow = info->status[0] & MAX77693_STATUS1_ADCLOW_MASK; + adclow >>= MAX77693_STATUS1_ADCLOW_SHIFT; + adc1k = info->status[0] & MAX77693_STATUS1_ADC1K_MASK; + adc1k >>= MAX77693_STATUS1_ADC1K_SHIFT; - vbvolt = info->status[1] & STATUS2_VBVOLT_MASK; - vbvolt >>= STATUS2_VBVOLT_SHIFT; + vbvolt = info->status[1] & MAX77693_STATUS2_VBVOLT_MASK; + vbvolt >>= MAX77693_STATUS2_VBVOLT_SHIFT; /** * [0x1|VBVolt|ADCLow|ADC1K] @@ -397,8 +399,8 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info, * Read charger type to check cable type and decide cable state * according to type of charger cable. */ - chg_type = info->status[1] & STATUS2_CHGTYP_MASK; - chg_type >>= STATUS2_CHGTYP_SHIFT; + chg_type = info->status[1] & MAX77693_STATUS2_CHGTYP_MASK; + chg_type >>= MAX77693_STATUS2_CHGTYP_SHIFT; if (chg_type == MAX77693_CHARGER_TYPE_NONE) { *attached = false; @@ -422,10 +424,10 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info, * Read ADC value to check cable type and decide cable state * according to cable type */ - adc = info->status[0] & STATUS1_ADC_MASK; - adc >>= STATUS1_ADC_SHIFT; - chg_type = info->status[1] & STATUS2_CHGTYP_MASK; - chg_type >>= STATUS2_CHGTYP_SHIFT; + adc = info->status[0] & MAX77693_STATUS1_ADC_MASK; + adc >>= MAX77693_STATUS1_ADC_SHIFT; + chg_type = info->status[1] & MAX77693_STATUS2_CHGTYP_MASK; + chg_type >>= MAX77693_STATUS2_CHGTYP_SHIFT; if (adc == MAX77693_MUIC_ADC_OPEN && chg_type == MAX77693_CHARGER_TYPE_NONE) @@ -437,8 +439,8 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info, * Read vbvolt field, if vbvolt is 1, * this cable is used for charging. */ - vbvolt = info->status[1] & STATUS2_VBVOLT_MASK; - vbvolt >>= STATUS2_VBVOLT_SHIFT; + vbvolt = info->status[1] & MAX77693_STATUS2_VBVOLT_MASK; + vbvolt >>= MAX77693_STATUS2_VBVOLT_SHIFT; cable_type = vbvolt; break; @@ -520,7 +522,8 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info, } /* Dock-Car/Desk/Audio, PATH:AUDIO */ - ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached); + ret = max77693_muic_set_path(info, MAX77693_CONTROL1_SW_AUDIO, + attached); if (ret < 0) return ret; extcon_set_cable_state_(info->edev, dock_id, attached); @@ -585,14 +588,16 @@ static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info) case MAX77693_MUIC_GND_USB_HOST: case MAX77693_MUIC_GND_USB_HOST_VB: /* USB_HOST, PATH: AP_USB */ - ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached); + ret = max77693_muic_set_path(info, MAX77693_CONTROL1_SW_USB, + attached); if (ret < 0) return ret; extcon_set_cable_state_(info->edev, EXTCON_USB_HOST, attached); break; case MAX77693_MUIC_GND_AV_CABLE_LOAD: /* Audio Video Cable with load, PATH:AUDIO */ - ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached); + ret = max77693_muic_set_path(info, MAX77693_CONTROL1_SW_AUDIO, + attached); if (ret < 0) return ret; extcon_set_cable_state_(info->edev, EXTCON_USB, attached); @@ -615,7 +620,7 @@ static int max77693_muic_jig_handler(struct max77693_muic_info *info, int cable_type, bool attached) { int ret = 0; - u8 path = CONTROL1_SW_OPEN; + u8 path = MAX77693_CONTROL1_SW_OPEN; dev_info(info->dev, "external connector is %s (adc:0x%02x)\n", @@ -625,12 +630,12 @@ static int max77693_muic_jig_handler(struct max77693_muic_info *info, case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF: /* ADC_JIG_USB_OFF */ case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON: /* ADC_JIG_USB_ON */ /* PATH:AP_USB */ - path = CONTROL1_SW_USB; + path = MAX77693_CONTROL1_SW_USB; break; case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF: /* ADC_JIG_UART_OFF */ case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* ADC_JIG_UART_ON */ /* PATH:AP_UART */ - path = CONTROL1_SW_UART; + path = MAX77693_CONTROL1_SW_UART; break; default: dev_err(info->dev, "failed to detect %s jig cable\n", @@ -1077,7 +1082,7 @@ static int max77693_muic_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "allocate register map\n"); } else { info->max77693->regmap_muic = devm_regmap_init_i2c( - info->max77693->muic, + info->max77693->i2c_muic, &max77693_muic_regmap_config); if (IS_ERR(info->max77693->regmap_muic)) { ret = PTR_ERR(info->max77693->regmap_muic); @@ -1164,28 +1169,9 @@ static int max77693_muic_probe(struct platform_device *pdev) } for (i = 0; i < num_init_data; i++) { - enum max77693_irq_source irq_src - = MAX77693_IRQ_GROUP_NR; - regmap_write(info->max77693->regmap_muic, init_data[i].addr, init_data[i].data); - - switch (init_data[i].addr) { - case MAX77693_MUIC_REG_INTMASK1: - irq_src = MUIC_INT1; - break; - case MAX77693_MUIC_REG_INTMASK2: - irq_src = MUIC_INT2; - break; - case MAX77693_MUIC_REG_INTMASK3: - irq_src = MUIC_INT3; - break; - } - - if (irq_src < MAX77693_IRQ_GROUP_NR) - info->max77693->irq_masks_cur[irq_src] - = init_data[i].data; } if (pdata && pdata->muic_data) { @@ -1199,12 +1185,12 @@ static int max77693_muic_probe(struct platform_device *pdev) if (muic_pdata->path_uart) info->path_uart = muic_pdata->path_uart; else - info->path_uart = CONTROL1_SW_UART; + info->path_uart = MAX77693_CONTROL1_SW_UART; if (muic_pdata->path_usb) info->path_usb = muic_pdata->path_usb; else - info->path_usb = CONTROL1_SW_USB; + info->path_usb = MAX77693_CONTROL1_SW_USB; /* * Default delay time for detecting cable state @@ -1216,8 +1202,8 @@ static int max77693_muic_probe(struct platform_device *pdev) else delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); } else { - info->path_usb = CONTROL1_SW_USB; - info->path_uart = CONTROL1_SW_UART; + info->path_usb = MAX77693_CONTROL1_SW_USB; + info->path_uart = MAX77693_CONTROL1_SW_UART; delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); } diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c index fac2f1417..fdd928542 100644 --- a/drivers/extcon/extcon-max77843.c +++ b/drivers/extcon/extcon-max77843.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -32,7 +33,7 @@ enum max77843_muic_status { struct max77843_muic_info { struct device *dev; - struct max77843 *max77843; + struct max77693_dev *max77843; struct extcon_dev *edev; struct mutex mutex; @@ -198,18 +199,18 @@ static const struct regmap_irq_chip max77843_muic_irq_chip = { static int max77843_muic_set_path(struct max77843_muic_info *info, u8 val, bool attached) { - struct max77843 *max77843 = info->max77843; + struct max77693_dev *max77843 = info->max77843; int ret = 0; unsigned int ctrl1, ctrl2; if (attached) ctrl1 = val; else - ctrl1 = CONTROL1_SW_OPEN; + ctrl1 = MAX77843_MUIC_CONTROL1_SW_OPEN; ret = regmap_update_bits(max77843->regmap_muic, MAX77843_MUIC_REG_CONTROL1, - CONTROL1_COM_SW, ctrl1); + MAX77843_MUIC_CONTROL1_COM_SW, ctrl1); if (ret < 0) { dev_err(info->dev, "Cannot switch MUIC port\n"); return ret; @@ -243,7 +244,7 @@ static int max77843_muic_get_cable_type(struct max77843_muic_info *info, adc = info->status[MAX77843_MUIC_STATUS1] & MAX77843_MUIC_STATUS1_ADC_MASK; - adc >>= STATUS1_ADC_SHIFT; + adc >>= MAX77843_MUIC_STATUS1_ADC_SHIFT; switch (group) { case MAX77843_CABLE_GROUP_ADC: @@ -309,7 +310,7 @@ static int max77843_muic_get_cable_type(struct max77843_muic_info *info, /* Get VBVolt register bit */ gnd_type |= (info->status[MAX77843_MUIC_STATUS2] & MAX77843_MUIC_STATUS2_VBVOLT_MASK); - gnd_type >>= STATUS2_VBVOLT_SHIFT; + gnd_type >>= MAX77843_MUIC_STATUS2_VBVOLT_SHIFT; /* Offset of GND cable */ gnd_type |= MAX77843_MUIC_GND_USB_HOST; @@ -338,7 +339,9 @@ static int max77843_muic_adc_gnd_handler(struct max77843_muic_info *info) switch (gnd_cable_type) { case MAX77843_MUIC_GND_USB_HOST: case MAX77843_MUIC_GND_USB_HOST_VB: - ret = max77843_muic_set_path(info, CONTROL1_SW_USB, attached); + ret = max77843_muic_set_path(info, + MAX77843_MUIC_CONTROL1_SW_USB, + attached); if (ret < 0) return ret; @@ -346,7 +349,9 @@ static int max77843_muic_adc_gnd_handler(struct max77843_muic_info *info) break; case MAX77843_MUIC_GND_MHL_VB: case MAX77843_MUIC_GND_MHL: - ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached); + ret = max77843_muic_set_path(info, + MAX77843_MUIC_CONTROL1_SW_OPEN, + attached); if (ret < 0) return ret; @@ -365,7 +370,7 @@ static int max77843_muic_jig_handler(struct max77843_muic_info *info, int cable_type, bool attached) { int ret; - u8 path = CONTROL1_SW_OPEN; + u8 path = MAX77843_MUIC_CONTROL1_SW_OPEN; dev_dbg(info->dev, "external connector is %s (adc:0x%02x)\n", attached ? "attached" : "detached", cable_type); @@ -373,10 +378,10 @@ static int max77843_muic_jig_handler(struct max77843_muic_info *info, switch (cable_type) { case MAX77843_MUIC_ADC_FACTORY_MODE_USB_OFF: case MAX77843_MUIC_ADC_FACTORY_MODE_USB_ON: - path = CONTROL1_SW_USB; + path = MAX77843_MUIC_CONTROL1_SW_USB; break; case MAX77843_MUIC_ADC_FACTORY_MODE_UART_OFF: - path = CONTROL1_SW_UART; + path = MAX77843_MUIC_CONTROL1_SW_UART; break; default: return -EINVAL; @@ -474,14 +479,18 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info) switch (chg_type) { case MAX77843_MUIC_CHG_USB: - ret = max77843_muic_set_path(info, CONTROL1_SW_USB, attached); + ret = max77843_muic_set_path(info, + MAX77843_MUIC_CONTROL1_SW_USB, + attached); if (ret < 0) return ret; extcon_set_cable_state_(info->edev, EXTCON_USB, attached); break; case MAX77843_MUIC_CHG_DOWNSTREAM: - ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached); + ret = max77843_muic_set_path(info, + MAX77843_MUIC_CONTROL1_SW_OPEN, + attached); if (ret < 0) return ret; @@ -489,14 +498,18 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info) attached); break; case MAX77843_MUIC_CHG_DEDICATED: - ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached); + ret = max77843_muic_set_path(info, + MAX77843_MUIC_CONTROL1_SW_OPEN, + attached); if (ret < 0) return ret; extcon_set_cable_state_(info->edev, EXTCON_TA, attached); break; case MAX77843_MUIC_CHG_SPECIAL_500MA: - ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached); + ret = max77843_muic_set_path(info, + MAX77843_MUIC_CONTROL1_SW_OPEN, + attached); if (ret < 0) return ret; @@ -504,7 +517,9 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info) attached); break; case MAX77843_MUIC_CHG_SPECIAL_1A: - ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached); + ret = max77843_muic_set_path(info, + MAX77843_MUIC_CONTROL1_SW_OPEN, + attached); if (ret < 0) return ret; @@ -528,7 +543,8 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info) "failed to detect %s accessory (chg_type:0x%x)\n", attached ? "attached" : "detached", chg_type); - max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached); + max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_OPEN, + attached); return -EINVAL; } @@ -539,7 +555,7 @@ static void max77843_muic_irq_work(struct work_struct *work) { struct max77843_muic_info *info = container_of(work, struct max77843_muic_info, irq_work); - struct max77843 *max77843 = info->max77843; + struct max77693_dev *max77843 = info->max77843; int ret = 0; mutex_lock(&info->mutex); @@ -615,7 +631,7 @@ static void max77843_muic_detect_cable_wq(struct work_struct *work) { struct max77843_muic_info *info = container_of(to_delayed_work(work), struct max77843_muic_info, wq_detcable); - struct max77843 *max77843 = info->max77843; + struct max77693_dev *max77843 = info->max77843; int chg_type, adc, ret; bool attached; @@ -656,7 +672,7 @@ err_cable_wq: static int max77843_muic_set_debounce_time(struct max77843_muic_info *info, enum max77843_muic_adc_debounce_time time) { - struct max77843 *max77843 = info->max77843; + struct max77693_dev *max77843 = info->max77843; int ret; switch (time) { @@ -667,7 +683,7 @@ static int max77843_muic_set_debounce_time(struct max77843_muic_info *info, ret = regmap_update_bits(max77843->regmap_muic, MAX77843_MUIC_REG_CONTROL4, MAX77843_MUIC_CONTROL4_ADCDBSET_MASK, - time << CONTROL4_ADCDBSET_SHIFT); + time << MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT); if (ret < 0) { dev_err(info->dev, "Cannot write MUIC regmap\n"); return ret; @@ -681,7 +697,7 @@ static int max77843_muic_set_debounce_time(struct max77843_muic_info *info, return 0; } -static int max77843_init_muic_regmap(struct max77843 *max77843) +static int max77843_init_muic_regmap(struct max77693_dev *max77843) { int ret; @@ -720,7 +736,7 @@ err_muic_i2c: static int max77843_muic_probe(struct platform_device *pdev) { - struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent); + struct max77693_dev *max77843 = dev_get_drvdata(pdev->dev.parent); struct max77843_muic_info *info; unsigned int id; int i, ret; @@ -768,7 +784,7 @@ static int max77843_muic_probe(struct platform_device *pdev) max77843_muic_set_debounce_time(info, MAX77843_DEBOUNCE_TIME_25MS); /* Set initial path for UART */ - max77843_muic_set_path(info, CONTROL1_SW_UART, true); + max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_UART, true); /* Check revision number of MUIC device */ ret = regmap_read(max77843->regmap_muic, MAX77843_MUIC_REG_ID, &id); @@ -781,6 +797,15 @@ static int max77843_muic_probe(struct platform_device *pdev) /* Support virtual irq domain for max77843 MUIC device */ INIT_WORK(&info->irq_work, max77843_muic_irq_work); + /* Clear IRQ bits before request IRQs */ + ret = regmap_bulk_read(max77843->regmap_muic, + MAX77843_MUIC_REG_INT1, info->status, + MAX77843_MUIC_IRQ_NUM); + if (ret) { + dev_err(&pdev->dev, "Failed to Clear IRQ bits\n"); + goto err_muic_irq; + } + for (i = 0; i < ARRAY_SIZE(max77843_muic_irqs); i++) { struct max77843_muic_irq *muic_irq = &max77843_muic_irqs[i]; unsigned int virq = 0; @@ -821,7 +846,7 @@ err_muic_irq: static int max77843_muic_remove(struct platform_device *pdev) { struct max77843_muic_info *info = platform_get_drvdata(pdev); - struct max77843 *max77843 = info->max77843; + struct max77693_dev *max77843 = info->max77843; cancel_work_sync(&info->irq_work); regmap_del_irq_chip(max77843->irq, max77843->irq_data_muic); diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c index eebdf2a33..93c30a885 100644 --- a/drivers/extcon/extcon-palmas.c +++ b/drivers/extcon/extcon-palmas.c @@ -28,6 +28,11 @@ #include #include #include +#include +#include +#include + +#define USB_GPIO_DEBOUNCE_MS 20 /* ms */ static const unsigned int palmas_extcon_cable[] = { EXTCON_USB, @@ -35,8 +40,6 @@ static const unsigned int palmas_extcon_cable[] = { EXTCON_NONE, }; -static const int mutually_exclusive[] = {0x3, 0x0}; - static void palmas_usb_wakeup(struct palmas *palmas, int enable) { if (enable) @@ -120,19 +123,54 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb) return IRQ_HANDLED; } +static void palmas_gpio_id_detect(struct work_struct *work) +{ + int id; + struct palmas_usb *palmas_usb = container_of(to_delayed_work(work), + struct palmas_usb, + wq_detectid); + struct extcon_dev *edev = palmas_usb->edev; + + if (!palmas_usb->id_gpiod) + return; + + id = gpiod_get_value_cansleep(palmas_usb->id_gpiod); + + if (id) { + extcon_set_cable_state_(edev, EXTCON_USB_HOST, false); + dev_info(palmas_usb->dev, "USB-HOST cable is detached\n"); + } else { + extcon_set_cable_state_(edev, EXTCON_USB_HOST, true); + dev_info(palmas_usb->dev, "USB-HOST cable is attached\n"); + } +} + +static irqreturn_t palmas_gpio_id_irq_handler(int irq, void *_palmas_usb) +{ + struct palmas_usb *palmas_usb = _palmas_usb; + + queue_delayed_work(system_power_efficient_wq, &palmas_usb->wq_detectid, + palmas_usb->sw_debounce_jiffies); + + return IRQ_HANDLED; +} + static void palmas_enable_irq(struct palmas_usb *palmas_usb) { palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE, PALMAS_USB_VBUS_CTRL_SET, PALMAS_USB_VBUS_CTRL_SET_VBUS_ACT_COMP); - palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE, - PALMAS_USB_ID_CTRL_SET, PALMAS_USB_ID_CTRL_SET_ID_ACT_COMP); + if (palmas_usb->enable_id_detection) { + palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE, + PALMAS_USB_ID_CTRL_SET, + PALMAS_USB_ID_CTRL_SET_ID_ACT_COMP); - palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE, - PALMAS_USB_ID_INT_EN_HI_SET, - PALMAS_USB_ID_INT_EN_HI_SET_ID_GND | - PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT); + palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE, + PALMAS_USB_ID_INT_EN_HI_SET, + PALMAS_USB_ID_INT_EN_HI_SET_ID_GND | + PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT); + } if (palmas_usb->enable_vbus_detection) palmas_vbus_irq_handler(palmas_usb->vbus_irq, palmas_usb); @@ -171,20 +209,37 @@ static int palmas_usb_probe(struct platform_device *pdev) palmas_usb->wakeup = pdata->wakeup; } + palmas_usb->id_gpiod = devm_gpiod_get_optional(&pdev->dev, "id", + GPIOD_IN); + if (IS_ERR(palmas_usb->id_gpiod)) { + dev_err(&pdev->dev, "failed to get id gpio\n"); + return PTR_ERR(palmas_usb->id_gpiod); + } + + if (palmas_usb->enable_id_detection && palmas_usb->id_gpiod) { + palmas_usb->enable_id_detection = false; + palmas_usb->enable_gpio_id_detection = true; + } + + if (palmas_usb->enable_gpio_id_detection) { + u32 debounce; + + if (of_property_read_u32(node, "debounce-delay-ms", &debounce)) + debounce = USB_GPIO_DEBOUNCE_MS; + + status = gpiod_set_debounce(palmas_usb->id_gpiod, + debounce * 1000); + if (status < 0) + palmas_usb->sw_debounce_jiffies = msecs_to_jiffies(debounce); + } + + INIT_DELAYED_WORK(&palmas_usb->wq_detectid, palmas_gpio_id_detect); + palmas->usb = palmas_usb; palmas_usb->palmas = palmas; palmas_usb->dev = &pdev->dev; - palmas_usb->id_otg_irq = regmap_irq_get_virq(palmas->irq_data, - PALMAS_ID_OTG_IRQ); - palmas_usb->id_irq = regmap_irq_get_virq(palmas->irq_data, - PALMAS_ID_IRQ); - palmas_usb->vbus_otg_irq = regmap_irq_get_virq(palmas->irq_data, - PALMAS_VBUS_OTG_IRQ); - palmas_usb->vbus_irq = regmap_irq_get_virq(palmas->irq_data, - PALMAS_VBUS_IRQ); - palmas_usb_wakeup(palmas, palmas_usb->wakeup); platform_set_drvdata(pdev, palmas_usb); @@ -195,7 +250,6 @@ static int palmas_usb_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to allocate extcon device\n"); return -ENOMEM; } - palmas_usb->edev->mutually_exclusive = mutually_exclusive; status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev); if (status) { @@ -204,6 +258,10 @@ static int palmas_usb_probe(struct platform_device *pdev) } if (palmas_usb->enable_id_detection) { + palmas_usb->id_otg_irq = regmap_irq_get_virq(palmas->irq_data, + PALMAS_ID_OTG_IRQ); + palmas_usb->id_irq = regmap_irq_get_virq(palmas->irq_data, + PALMAS_ID_IRQ); status = devm_request_threaded_irq(palmas_usb->dev, palmas_usb->id_irq, NULL, palmas_id_irq_handler, @@ -215,9 +273,33 @@ static int palmas_usb_probe(struct platform_device *pdev) palmas_usb->id_irq, status); return status; } + } else if (palmas_usb->enable_gpio_id_detection) { + palmas_usb->gpio_id_irq = gpiod_to_irq(palmas_usb->id_gpiod); + if (palmas_usb->gpio_id_irq < 0) { + dev_err(&pdev->dev, "failed to get id irq\n"); + return palmas_usb->gpio_id_irq; + } + status = devm_request_threaded_irq(&pdev->dev, + palmas_usb->gpio_id_irq, + NULL, + palmas_gpio_id_irq_handler, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING | + IRQF_ONESHOT, + "palmas_usb_id", + palmas_usb); + if (status < 0) { + dev_err(&pdev->dev, + "failed to request handler for id irq\n"); + return status; + } } if (palmas_usb->enable_vbus_detection) { + palmas_usb->vbus_otg_irq = regmap_irq_get_virq(palmas->irq_data, + PALMAS_VBUS_OTG_IRQ); + palmas_usb->vbus_irq = regmap_irq_get_virq(palmas->irq_data, + PALMAS_VBUS_IRQ); status = devm_request_threaded_irq(palmas_usb->dev, palmas_usb->vbus_irq, NULL, palmas_vbus_irq_handler, @@ -232,10 +314,21 @@ static int palmas_usb_probe(struct platform_device *pdev) } palmas_enable_irq(palmas_usb); + /* perform initial detection */ + palmas_gpio_id_detect(&palmas_usb->wq_detectid.work); device_set_wakeup_capable(&pdev->dev, true); return 0; } +static int palmas_usb_remove(struct platform_device *pdev) +{ + struct palmas_usb *palmas_usb = platform_get_drvdata(pdev); + + cancel_delayed_work_sync(&palmas_usb->wq_detectid); + + return 0; +} + #ifdef CONFIG_PM_SLEEP static int palmas_usb_suspend(struct device *dev) { @@ -246,6 +339,8 @@ static int palmas_usb_suspend(struct device *dev) enable_irq_wake(palmas_usb->vbus_irq); if (palmas_usb->enable_id_detection) enable_irq_wake(palmas_usb->id_irq); + if (palmas_usb->enable_gpio_id_detection) + enable_irq_wake(palmas_usb->gpio_id_irq); } return 0; } @@ -259,6 +354,8 @@ static int palmas_usb_resume(struct device *dev) disable_irq_wake(palmas_usb->vbus_irq); if (palmas_usb->enable_id_detection) disable_irq_wake(palmas_usb->id_irq); + if (palmas_usb->enable_gpio_id_detection) + disable_irq_wake(palmas_usb->gpio_id_irq); } return 0; }; @@ -276,6 +373,7 @@ static const struct of_device_id of_palmas_match_tbl[] = { static struct platform_driver palmas_usb_driver = { .probe = palmas_usb_probe, + .remove = palmas_usb_remove, .driver = { .name = "palmas-usb", .of_match_table = of_palmas_match_tbl, diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c index 92c939221..11592e980 100644 --- a/drivers/extcon/extcon-rt8973a.c +++ b/drivers/extcon/extcon-rt8973a.c @@ -693,7 +693,6 @@ MODULE_DEVICE_TABLE(i2c, rt8973a_i2c_id); static struct i2c_driver rt8973a_muic_i2c_driver = { .driver = { .name = "rt8973a", - .owner = THIS_MODULE, .pm = &rt8973a_muic_pm_ops, .of_match_table = rt8973a_dt_match, }, diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c index 817dece23..0ffefefa2 100644 --- a/drivers/extcon/extcon-sm5502.c +++ b/drivers/extcon/extcon-sm5502.c @@ -685,7 +685,6 @@ MODULE_DEVICE_TABLE(i2c, sm5502_i2c_id); static struct i2c_driver sm5502_muic_i2c_driver = { .driver = { .name = "sm5502", - .owner = THIS_MODULE, .pm = &sm5502_muic_pm_ops, .of_match_table = sm5502_dt_match, }, diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c index a2a44536a..2b2fecffb 100644 --- a/drivers/extcon/extcon-usb-gpio.c +++ b/drivers/extcon/extcon-usb-gpio.c @@ -15,6 +15,7 @@ */ #include +#include #include #include #include diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index ca94f475f..8dd0af1d5 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -172,14 +172,6 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr, int i, count = 0; struct extcon_dev *edev = dev_get_drvdata(dev); - if (edev->print_state) { - int ret = edev->print_state(edev, buf); - - if (ret >= 0) - return ret; - /* Use default if failed */ - } - if (edev->max_supported == 0) return sprintf(buf, "%u\n", edev->state); @@ -272,6 +264,9 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state) unsigned long flags; bool attached; + if (!edev) + return -EINVAL; + spin_lock_irqsave(&edev->lock, flags); if (edev->state != ((edev->state & ~mask) | (state & mask))) { @@ -345,6 +340,9 @@ EXPORT_SYMBOL_GPL(extcon_update_state); */ int extcon_set_state(struct extcon_dev *edev, u32 state) { + if (!edev) + return -EINVAL; + return extcon_update_state(edev, 0xffffffff, state); } EXPORT_SYMBOL_GPL(extcon_set_state); @@ -358,6 +356,9 @@ int extcon_get_cable_state_(struct extcon_dev *edev, const unsigned int id) { int index; + if (!edev) + return -EINVAL; + index = find_cable_index_by_id(edev, id); if (index < 0) return index; @@ -402,6 +403,9 @@ int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id, u32 state; int index; + if (!edev) + return -EINVAL; + index = find_cable_index_by_id(edev, id); if (index < 0) return index; @@ -444,6 +448,9 @@ struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) { struct extcon_dev *sd; + if (!extcon_name) + return ERR_PTR(-EINVAL); + mutex_lock(&extcon_dev_list_lock); list_for_each_entry(sd, &extcon_dev_list, entry) { if (!strcmp(sd->name, extcon_name)) @@ -572,6 +579,9 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id, unsigned long flags; int ret, idx; + if (!edev || !nb) + return -EINVAL; + idx = find_cable_index_by_id(edev, id); spin_lock_irqsave(&edev->lock, flags); @@ -594,6 +604,9 @@ int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id, unsigned long flags; int ret, idx; + if (!edev || !nb) + return -EINVAL; + idx = find_cable_index_by_id(edev, id); spin_lock_irqsave(&edev->lock, flags); @@ -654,6 +667,9 @@ struct extcon_dev *extcon_dev_allocate(const unsigned int *supported_cable) { struct extcon_dev *edev; + if (!supported_cable) + return ERR_PTR(-EINVAL); + edev = kzalloc(sizeof(*edev), GFP_KERNEL); if (!edev) return ERR_PTR(-ENOMEM); @@ -754,7 +770,7 @@ int extcon_dev_register(struct extcon_dev *edev) return ret; } - if (!edev->supported_cable) + if (!edev || !edev->supported_cable) return -EINVAL; for (; edev->supported_cable[index] != EXTCON_NONE; index++); @@ -960,6 +976,9 @@ void extcon_dev_unregister(struct extcon_dev *edev) { int index; + if (!edev) + return; + mutex_lock(&extcon_dev_list_lock); list_del(&edev->entry); mutex_unlock(&extcon_dev_list_lock); @@ -1066,6 +1085,9 @@ struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index) struct device_node *node; struct extcon_dev *edev; + if (!dev) + return ERR_PTR(-EINVAL); + if (!dev->of_node) { dev_err(dev, "device does not have a device node entry\n"); return ERR_PTR(-EINVAL); diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 99c69a320..665efca59 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -5,6 +5,9 @@ menu "Firmware Drivers" +config ARM_PSCI_FW + bool + config EDD tristate "BIOS Enhanced Disk Drive calls determine boot disk" depends on X86 @@ -136,6 +139,14 @@ config QCOM_SCM bool depends on ARM || ARM64 +config QCOM_SCM_32 + def_bool y + depends on QCOM_SCM && ARM + +config QCOM_SCM_64 + def_bool y + depends on QCOM_SCM && ARM64 + source "drivers/firmware/broadcom/Kconfig" source "drivers/firmware/google/Kconfig" source "drivers/firmware/efi/Kconfig" diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 4a4b897f9..2ee83474a 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -1,6 +1,7 @@ # # Makefile for the linux kernel. # +obj-$(CONFIG_ARM_PSCI_FW) += psci.o obj-$(CONFIG_DMI) += dmi_scan.o obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o obj-$(CONFIG_EDD) += edd.o @@ -12,7 +13,8 @@ obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o obj-$(CONFIG_QCOM_SCM) += qcom_scm.o -obj-$(CONFIG_QCOM_SCM) += qcom_scm-32.o +obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o +obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1) obj-y += broadcom/ diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 54071c148..84533e02f 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -43,7 +43,7 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE config EFI_RUNTIME_MAP bool "Export efi runtime maps to sysfs" - depends on X86 && EFI && KEXEC + depends on X86 && EFI && KEXEC_CORE default y help Export efi runtime memory maps to /sys/firmware/efi/runtime-map. diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index e334a01cf..6b6548fda 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -5,10 +5,6 @@ /* error code which can't be mistaken for valid address */ #define EFI_ERROR (~0UL) -#undef memcpy -#undef memset -#undef memmove - void efi_char16_printk(efi_system_table_t *, efi_char16_t *); efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image, diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c new file mode 100644 index 000000000..42700f09a --- /dev/null +++ b/drivers/firmware/psci.c @@ -0,0 +1,382 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2015 ARM Limited + */ + +#define pr_fmt(fmt) "psci: " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +/* + * While a 64-bit OS can make calls with SMC32 calling conventions, for some + * calls it is necessary to use SMC64 to pass or return 64-bit values. For such + * calls PSCI_0_2_FN_NATIVE(x) will choose the appropriate (native-width) + * function ID. + */ +#ifdef CONFIG_64BIT +#define PSCI_0_2_FN_NATIVE(name) PSCI_0_2_FN64_##name +#else +#define PSCI_0_2_FN_NATIVE(name) PSCI_0_2_FN_##name +#endif + +/* + * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF + * calls to its resident CPU, so we must avoid issuing those. We never migrate + * a Trusted OS even if it claims to be capable of migration -- doing so will + * require cooperation with a Trusted OS driver. + */ +static int resident_cpu = -1; + +bool psci_tos_resident_on(int cpu) +{ + return cpu == resident_cpu; +} + +struct psci_operations psci_ops; + +typedef unsigned long (psci_fn)(unsigned long, unsigned long, + unsigned long, unsigned long); +asmlinkage psci_fn __invoke_psci_fn_hvc; +asmlinkage psci_fn __invoke_psci_fn_smc; +static psci_fn *invoke_psci_fn; + +enum psci_function { + PSCI_FN_CPU_SUSPEND, + PSCI_FN_CPU_ON, + PSCI_FN_CPU_OFF, + PSCI_FN_MIGRATE, + PSCI_FN_MAX, +}; + +static u32 psci_function_id[PSCI_FN_MAX]; + +static int psci_to_linux_errno(int errno) +{ + switch (errno) { + case PSCI_RET_SUCCESS: + return 0; + case PSCI_RET_NOT_SUPPORTED: + return -EOPNOTSUPP; + case PSCI_RET_INVALID_PARAMS: + return -EINVAL; + case PSCI_RET_DENIED: + return -EPERM; + }; + + return -EINVAL; +} + +static u32 psci_get_version(void) +{ + return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0); +} + +static int psci_cpu_suspend(u32 state, unsigned long entry_point) +{ + int err; + u32 fn; + + fn = psci_function_id[PSCI_FN_CPU_SUSPEND]; + err = invoke_psci_fn(fn, state, entry_point, 0); + return psci_to_linux_errno(err); +} + +static int psci_cpu_off(u32 state) +{ + int err; + u32 fn; + + fn = psci_function_id[PSCI_FN_CPU_OFF]; + err = invoke_psci_fn(fn, state, 0, 0); + return psci_to_linux_errno(err); +} + +static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point) +{ + int err; + u32 fn; + + fn = psci_function_id[PSCI_FN_CPU_ON]; + err = invoke_psci_fn(fn, cpuid, entry_point, 0); + return psci_to_linux_errno(err); +} + +static int psci_migrate(unsigned long cpuid) +{ + int err; + u32 fn; + + fn = psci_function_id[PSCI_FN_MIGRATE]; + err = invoke_psci_fn(fn, cpuid, 0, 0); + return psci_to_linux_errno(err); +} + +static int psci_affinity_info(unsigned long target_affinity, + unsigned long lowest_affinity_level) +{ + return invoke_psci_fn(PSCI_0_2_FN_NATIVE(AFFINITY_INFO), + target_affinity, lowest_affinity_level, 0); +} + +static int psci_migrate_info_type(void) +{ + return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0); +} + +static unsigned long psci_migrate_info_up_cpu(void) +{ + return invoke_psci_fn(PSCI_0_2_FN_NATIVE(MIGRATE_INFO_UP_CPU), + 0, 0, 0); +} + +static int get_set_conduit_method(struct device_node *np) +{ + const char *method; + + pr_info("probing for conduit method from DT.\n"); + + if (of_property_read_string(np, "method", &method)) { + pr_warn("missing \"method\" property\n"); + return -ENXIO; + } + + if (!strcmp("hvc", method)) { + invoke_psci_fn = __invoke_psci_fn_hvc; + } else if (!strcmp("smc", method)) { + invoke_psci_fn = __invoke_psci_fn_smc; + } else { + pr_warn("invalid \"method\" property: %s\n", method); + return -EINVAL; + } + return 0; +} + +static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd) +{ + invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0); +} + +static void psci_sys_poweroff(void) +{ + invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0); +} + +/* + * Detect the presence of a resident Trusted OS which may cause CPU_OFF to + * return DENIED (which would be fatal). + */ +static void __init psci_init_migrate(void) +{ + unsigned long cpuid; + int type, cpu = -1; + + type = psci_ops.migrate_info_type(); + + if (type == PSCI_0_2_TOS_MP) { + pr_info("Trusted OS migration not required\n"); + return; + } + + if (type == PSCI_RET_NOT_SUPPORTED) { + pr_info("MIGRATE_INFO_TYPE not supported.\n"); + return; + } + + if (type != PSCI_0_2_TOS_UP_MIGRATE && + type != PSCI_0_2_TOS_UP_NO_MIGRATE) { + pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type); + return; + } + + cpuid = psci_migrate_info_up_cpu(); + if (cpuid & ~MPIDR_HWID_BITMASK) { + pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n", + cpuid); + return; + } + + cpu = get_logical_index(cpuid); + resident_cpu = cpu >= 0 ? cpu : -1; + + pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid); +} + +static void __init psci_0_2_set_functions(void) +{ + pr_info("Using standard PSCI v0.2 function IDs\n"); + psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN_NATIVE(CPU_SUSPEND); + psci_ops.cpu_suspend = psci_cpu_suspend; + + psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF; + psci_ops.cpu_off = psci_cpu_off; + + psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN_NATIVE(CPU_ON); + psci_ops.cpu_on = psci_cpu_on; + + psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN_NATIVE(MIGRATE); + psci_ops.migrate = psci_migrate; + + psci_ops.affinity_info = psci_affinity_info; + + psci_ops.migrate_info_type = psci_migrate_info_type; + + arm_pm_restart = psci_sys_reset; + + pm_power_off = psci_sys_poweroff; +} + +/* + * Probe function for PSCI firmware versions >= 0.2 + */ +static int __init psci_probe(void) +{ + u32 ver = psci_get_version(); + + pr_info("PSCIv%d.%d detected in firmware.\n", + PSCI_VERSION_MAJOR(ver), + PSCI_VERSION_MINOR(ver)); + + if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) { + pr_err("Conflicting PSCI version detected.\n"); + return -EINVAL; + } + + psci_0_2_set_functions(); + + psci_init_migrate(); + + return 0; +} + +typedef int (*psci_initcall_t)(const struct device_node *); + +/* + * PSCI init function for PSCI versions >=0.2 + * + * Probe based on PSCI PSCI_VERSION function + */ +static int __init psci_0_2_init(struct device_node *np) +{ + int err; + + err = get_set_conduit_method(np); + + if (err) + goto out_put_node; + /* + * Starting with v0.2, the PSCI specification introduced a call + * (PSCI_VERSION) that allows probing the firmware version, so + * that PSCI function IDs and version specific initialization + * can be carried out according to the specific version reported + * by firmware + */ + err = psci_probe(); + +out_put_node: + of_node_put(np); + return err; +} + +/* + * PSCI < v0.2 get PSCI Function IDs via DT. + */ +static int __init psci_0_1_init(struct device_node *np) +{ + u32 id; + int err; + + err = get_set_conduit_method(np); + + if (err) + goto out_put_node; + + pr_info("Using PSCI v0.1 Function IDs from DT\n"); + + if (!of_property_read_u32(np, "cpu_suspend", &id)) { + psci_function_id[PSCI_FN_CPU_SUSPEND] = id; + psci_ops.cpu_suspend = psci_cpu_suspend; + } + + if (!of_property_read_u32(np, "cpu_off", &id)) { + psci_function_id[PSCI_FN_CPU_OFF] = id; + psci_ops.cpu_off = psci_cpu_off; + } + + if (!of_property_read_u32(np, "cpu_on", &id)) { + psci_function_id[PSCI_FN_CPU_ON] = id; + psci_ops.cpu_on = psci_cpu_on; + } + + if (!of_property_read_u32(np, "migrate", &id)) { + psci_function_id[PSCI_FN_MIGRATE] = id; + psci_ops.migrate = psci_migrate; + } + +out_put_node: + of_node_put(np); + return err; +} + +static const struct of_device_id const psci_of_match[] __initconst = { + { .compatible = "arm,psci", .data = psci_0_1_init}, + { .compatible = "arm,psci-0.2", .data = psci_0_2_init}, + {}, +}; + +int __init psci_dt_init(void) +{ + struct device_node *np; + const struct of_device_id *matched_np; + psci_initcall_t init_fn; + + np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np); + + if (!np) + return -ENODEV; + + init_fn = (psci_initcall_t)matched_np->data; + return init_fn(np); +} + +#ifdef CONFIG_ACPI +/* + * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's + * explicitly clarified in SBBR + */ +int __init psci_acpi_init(void) +{ + if (!acpi_psci_present()) { + pr_info("is not implemented in ACPI.\n"); + return -EOPNOTSUPP; + } + + pr_info("probing for conduit method from ACPI.\n"); + + if (acpi_psci_use_hvc()) + invoke_psci_fn = __invoke_psci_fn_hvc; + else + invoke_psci_fn = __invoke_psci_fn_smc; + + return psci_probe(); +} +#endif diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c index 1bd6f9c34..29e685066 100644 --- a/drivers/firmware/qcom_scm-32.c +++ b/drivers/firmware/qcom_scm-32.c @@ -24,7 +24,6 @@ #include #include -#include #include #include "qcom_scm.h" @@ -219,8 +218,7 @@ static int __qcom_scm_call(const struct qcom_scm_command *cmd) * Flush the command buffer so that the secure world sees * the correct data. */ - __cpuc_flush_dcache_area((void *)cmd, cmd->len); - outer_flush_range(cmd_addr, cmd_addr + cmd->len); + secure_flush_area(cmd, cmd->len); ret = smc(cmd_addr); if (ret < 0) diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c new file mode 100644 index 000000000..bb6555f6d --- /dev/null +++ b/drivers/firmware/qcom_scm-64.c @@ -0,0 +1,63 @@ +/* Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include + +/** + * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus + * @entry: Entry point function for the cpus + * @cpus: The cpumask of cpus that will use the entry point + * + * Set the cold boot address of the cpus. Any cpu outside the supported + * range would be removed from the cpu present mask. + */ +int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) +{ + return -ENOTSUPP; +} + +/** + * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus + * @entry: Entry point function for the cpus + * @cpus: The cpumask of cpus that will use the entry point + * + * Set the Linux entry point for the SCM to transfer control to when coming + * out of a power down. CPU power down may be executed on cpuidle or hotplug. + */ +int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) +{ + return -ENOTSUPP; +} + +/** + * qcom_scm_cpu_power_down() - Power down the cpu + * @flags - Flags to flush cache + * + * This is an end point to power down cpu. If there was a pending interrupt, + * the control would return from this function, otherwise, the cpu jumps to the + * warm boot entry point set for this cpu upon reset. + */ +void __qcom_scm_cpu_power_down(u32 flags) +{ +} + +int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id) +{ + return -ENOTSUPP; +} + +int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) +{ + return -ENOTSUPP; +} diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 8f1fe739c..8949b3f6f 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -113,7 +113,6 @@ config GPIO_74XX_MMIO config GPIO_ALTERA tristate "Altera GPIO" depends on OF_GPIO - select GPIO_GENERIC select GPIOLIB_IRQCHIP help Say Y or M here to build support for the Altera PIO device. @@ -131,6 +130,7 @@ config GPIO_BRCMSTB default y if ARCH_BRCMSTB depends on OF_GPIO && (ARCH_BRCMSTB || COMPILE_TEST) select GPIO_GENERIC + select GPIOLIB_IRQCHIP help Say yes here to enable GPIO support for Broadcom STB (BCM7XXX) SoCs. @@ -172,6 +172,7 @@ config GPIO_ETRAXFS depends on CRIS || COMPILE_TEST depends on OF select GPIO_GENERIC + select GPIOLIB_IRQCHIP help Say yes here to support the GPIO controller on Axis ETRAX FS SoCs. @@ -308,7 +309,6 @@ config GPIO_MVEBU def_bool y depends on PLAT_ORION depends on OF - select GPIO_GENERIC select GENERIC_IRQ_CHIP config GPIO_MXC @@ -356,7 +356,7 @@ config GPIO_PXA config GPIO_RCAR tristate "Renesas R-Car GPIO" - depends on ARM && (ARCH_SHMOBILE || COMPILE_TEST) + depends on ARCH_SHMOBILE || COMPILE_TEST select GPIOLIB_IRQCHIP help Say yes here to support GPIO on Renesas R-Car SoCs. @@ -1005,6 +1005,12 @@ config GPIO_MC33880 SPI driver for Freescale MC33880 high-side/low-side switch. This provides GPIO interface supporting inputs and outputs. +config GPIO_ZX + bool "ZTE ZX GPIO support" + select GPIOLIB_IRQCHIP + help + Say yes here to support the GPIO device on ZTE ZX SoCs. + endmenu menu "USB GPIO expanders" diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index f82cd678c..f79a7c482 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o obj-$(CONFIG_GPIO_ALTERA) += gpio-altera.o obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o +obj-$(CONFIG_ATH79) += gpio-ath79.o obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o @@ -116,3 +117,4 @@ obj-$(CONFIG_GPIO_XLP) += gpio-xlp.o obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o +obj-$(CONFIG_GPIO_ZX) += gpio-zx.o diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c index 07ba82317..903fcf4d0 100644 --- a/drivers/gpio/devres.c +++ b/drivers/gpio/devres.c @@ -59,13 +59,13 @@ static int devm_gpiod_match_array(struct device *dev, void *res, void *data) * automatically disposed on driver detach. See gpiod_get() for detailed * information about behavior and return values. */ -struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev, +struct gpio_desc *__must_check devm_gpiod_get(struct device *dev, const char *con_id, enum gpiod_flags flags) { return devm_gpiod_get_index(dev, con_id, 0, flags); } -EXPORT_SYMBOL(__devm_gpiod_get); +EXPORT_SYMBOL(devm_gpiod_get); /** * devm_gpiod_get_optional - Resource-managed gpiod_get_optional() @@ -77,13 +77,13 @@ EXPORT_SYMBOL(__devm_gpiod_get); * are automatically disposed on driver detach. See gpiod_get_optional() for * detailed information about behavior and return values. */ -struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev, +struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev, const char *con_id, enum gpiod_flags flags) { return devm_gpiod_get_index_optional(dev, con_id, 0, flags); } -EXPORT_SYMBOL(__devm_gpiod_get_optional); +EXPORT_SYMBOL(devm_gpiod_get_optional); /** * devm_gpiod_get_index - Resource-managed gpiod_get_index() @@ -96,7 +96,7 @@ EXPORT_SYMBOL(__devm_gpiod_get_optional); * automatically disposed on driver detach. See gpiod_get_index() for detailed * information about behavior and return values. */ -struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev, +struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev, const char *con_id, unsigned int idx, enum gpiod_flags flags) @@ -120,7 +120,7 @@ struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev, return desc; } -EXPORT_SYMBOL(__devm_gpiod_get_index); +EXPORT_SYMBOL(devm_gpiod_get_index); /** * devm_get_gpiod_from_child - get a GPIO descriptor from a device's child node @@ -182,10 +182,10 @@ EXPORT_SYMBOL(devm_get_gpiod_from_child); * gpiod_get_index_optional() for detailed information about behavior and * return values. */ -struct gpio_desc *__must_check __devm_gpiod_get_index_optional(struct device *dev, +struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev, const char *con_id, unsigned int index, - enum gpiod_flags flags) + enum gpiod_flags flags) { struct gpio_desc *desc; @@ -197,7 +197,7 @@ struct gpio_desc *__must_check __devm_gpiod_get_index_optional(struct device *de return desc; } -EXPORT_SYMBOL(__devm_gpiod_get_index_optional); +EXPORT_SYMBOL(devm_gpiod_get_index_optional); /** * devm_gpiod_get_array - Resource-managed gpiod_get_array() diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c index 0763655cc..6ed7c0fb3 100644 --- a/drivers/gpio/gpio-74xx-mmio.c +++ b/drivers/gpio/gpio-74xx-mmio.c @@ -129,7 +129,7 @@ static int mmio_74xx_gpio_probe(struct platform_device *pdev) if (IS_ERR(dat)) return PTR_ERR(dat); - priv->flags = (unsigned)of_id->data; + priv->flags = (uintptr_t) of_id->data; err = bgpio_init(&priv->bgc, &pdev->dev, DIV_ROUND_UP(MMIO_74XX_BIT_CNT(priv->flags), 8), diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c index d3fe6a677..984186ee5 100644 --- a/drivers/gpio/gpio-adp5588.c +++ b/drivers/gpio/gpio-adp5588.c @@ -305,15 +305,7 @@ static int adp5588_irq_setup(struct adp5588_gpio *dev) irq_set_chip_and_handler(irq, &adp5588_irq_chip, handle_level_irq); irq_set_nested_thread(irq, 1); -#ifdef CONFIG_ARM - /* - * ARM needs us to explicitly flag the IRQ as VALID, - * once we do so, it will also set the noprobe. - */ - set_irq_flags(irq, IRQF_VALID); -#else - irq_set_noprobe(irq); -#endif + irq_modify_status(irq, IRQ_NOREQUEST, IRQ_NOPROBE); } ret = request_threaded_irq(client->irq, diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c index 0f3d336d6..1b4494157 100644 --- a/drivers/gpio/gpio-altera.c +++ b/drivers/gpio/gpio-altera.c @@ -201,8 +201,7 @@ static int altera_gpio_direction_output(struct gpio_chip *gc, return 0; } -static void altera_gpio_irq_edge_handler(unsigned int irq, - struct irq_desc *desc) +static void altera_gpio_irq_edge_handler(struct irq_desc *desc) { struct altera_gpio_chip *altera_gc; struct irq_chip *chip; @@ -231,8 +230,7 @@ static void altera_gpio_irq_edge_handler(unsigned int irq, } -static void altera_gpio_irq_leveL_high_handler(unsigned int irq, - struct irq_desc *desc) +static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc) { struct altera_gpio_chip *altera_gc; struct irq_chip *chip; @@ -338,9 +336,9 @@ static int altera_gpio_remove(struct platform_device *pdev) { struct altera_gpio_chip *altera_gc = platform_get_drvdata(pdev); - gpiochip_remove(&altera_gc->mmchip.gc); + of_mm_gpiochip_remove(&altera_gc->mmchip); - return -EIO; + return 0; } static const struct of_device_id altera_gpio_of_match[] = { diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c new file mode 100644 index 000000000..03b995304 --- /dev/null +++ b/drivers/gpio/gpio-ath79.c @@ -0,0 +1,204 @@ +/* + * Atheros AR71XX/AR724X/AR913X GPIO API support + * + * Copyright (C) 2010-2011 Jaiganesh Narayanan + * Copyright (C) 2008-2011 Gabor Juhos + * Copyright (C) 2008 Imre Kaloz + * + * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static void __iomem *ath79_gpio_base; +static u32 ath79_gpio_count; +static DEFINE_SPINLOCK(ath79_gpio_lock); + +static void __ath79_gpio_set_value(unsigned gpio, int value) +{ + void __iomem *base = ath79_gpio_base; + + if (value) + __raw_writel(1 << gpio, base + AR71XX_GPIO_REG_SET); + else + __raw_writel(1 << gpio, base + AR71XX_GPIO_REG_CLEAR); +} + +static int __ath79_gpio_get_value(unsigned gpio) +{ + return (__raw_readl(ath79_gpio_base + AR71XX_GPIO_REG_IN) >> gpio) & 1; +} + +static int ath79_gpio_get_value(struct gpio_chip *chip, unsigned offset) +{ + return __ath79_gpio_get_value(offset); +} + +static void ath79_gpio_set_value(struct gpio_chip *chip, + unsigned offset, int value) +{ + __ath79_gpio_set_value(offset, value); +} + +static int ath79_gpio_direction_input(struct gpio_chip *chip, + unsigned offset) +{ + void __iomem *base = ath79_gpio_base; + unsigned long flags; + + spin_lock_irqsave(&ath79_gpio_lock, flags); + + __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset), + base + AR71XX_GPIO_REG_OE); + + spin_unlock_irqrestore(&ath79_gpio_lock, flags); + + return 0; +} + +static int ath79_gpio_direction_output(struct gpio_chip *chip, + unsigned offset, int value) +{ + void __iomem *base = ath79_gpio_base; + unsigned long flags; + + spin_lock_irqsave(&ath79_gpio_lock, flags); + + if (value) + __raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET); + else + __raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR); + + __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset), + base + AR71XX_GPIO_REG_OE); + + spin_unlock_irqrestore(&ath79_gpio_lock, flags); + + return 0; +} + +static int ar934x_gpio_direction_input(struct gpio_chip *chip, unsigned offset) +{ + void __iomem *base = ath79_gpio_base; + unsigned long flags; + + spin_lock_irqsave(&ath79_gpio_lock, flags); + + __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset), + base + AR71XX_GPIO_REG_OE); + + spin_unlock_irqrestore(&ath79_gpio_lock, flags); + + return 0; +} + +static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset, + int value) +{ + void __iomem *base = ath79_gpio_base; + unsigned long flags; + + spin_lock_irqsave(&ath79_gpio_lock, flags); + + if (value) + __raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET); + else + __raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR); + + __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset), + base + AR71XX_GPIO_REG_OE); + + spin_unlock_irqrestore(&ath79_gpio_lock, flags); + + return 0; +} + +static struct gpio_chip ath79_gpio_chip = { + .label = "ath79", + .get = ath79_gpio_get_value, + .set = ath79_gpio_set_value, + .direction_input = ath79_gpio_direction_input, + .direction_output = ath79_gpio_direction_output, + .base = 0, +}; + +static const struct of_device_id ath79_gpio_of_match[] = { + { .compatible = "qca,ar7100-gpio" }, + { .compatible = "qca,ar9340-gpio" }, + {}, +}; + +static int ath79_gpio_probe(struct platform_device *pdev) +{ + struct ath79_gpio_platform_data *pdata = pdev->dev.platform_data; + struct device_node *np = pdev->dev.of_node; + struct resource *res; + bool oe_inverted; + int err; + + if (np) { + err = of_property_read_u32(np, "ngpios", &ath79_gpio_count); + if (err) { + dev_err(&pdev->dev, "ngpios property is not valid\n"); + return err; + } + if (ath79_gpio_count >= 32) { + dev_err(&pdev->dev, "ngpios must be less than 32\n"); + return -EINVAL; + } + oe_inverted = of_device_is_compatible(np, "qca,ar9340-gpio"); + } else if (pdata) { + ath79_gpio_count = pdata->ngpios; + oe_inverted = pdata->oe_inverted; + } else { + dev_err(&pdev->dev, "No DT node or platform data found\n"); + return -EINVAL; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ath79_gpio_base = devm_ioremap_nocache( + &pdev->dev, res->start, resource_size(res)); + if (!ath79_gpio_base) + return -ENOMEM; + + ath79_gpio_chip.dev = &pdev->dev; + ath79_gpio_chip.ngpio = ath79_gpio_count; + if (oe_inverted) { + ath79_gpio_chip.direction_input = ar934x_gpio_direction_input; + ath79_gpio_chip.direction_output = ar934x_gpio_direction_output; + } + + err = gpiochip_add(&ath79_gpio_chip); + if (err) { + dev_err(&pdev->dev, + "cannot add AR71xx GPIO chip, error=%d", err); + return err; + } + + return 0; +} + +static struct platform_driver ath79_gpio_driver = { + .driver = { + .name = "ath79-gpio", + .of_match_table = ath79_gpio_of_match, + }, + .probe = ath79_gpio_probe, +}; + +module_platform_driver(ath79_gpio_driver); diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index 40343fa92..33a1f9779 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c @@ -433,12 +433,12 @@ static int bcm_kona_gpio_irq_set_type(struct irq_data *d, unsigned int type) return 0; } -static void bcm_kona_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void bcm_kona_gpio_irq_handler(struct irq_desc *desc) { void __iomem *reg_base; int bit, bank_id; unsigned long sta; - struct bcm_kona_gpio_bank *bank = irq_get_handler_data(irq); + struct bcm_kona_gpio_bank *bank = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); chained_irq_enter(chip, desc); @@ -525,11 +525,7 @@ static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq, return ret; irq_set_lockdep_class(irq, &gpio_lock_class); irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip, handle_simple_irq); -#ifdef CONFIG_ARM - set_irq_flags(irq, IRQF_VALID); -#else irq_set_noprobe(irq); -#endif return 0; } @@ -644,17 +640,6 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev) dev_err(dev, "Couldn't add GPIO chip -- %d\n", ret); goto err_irq_domain; } - for (i = 0; i < chip->ngpio; i++) { - int irq = bcm_kona_gpio_to_irq(chip, i); - irq_set_lockdep_class(irq, &gpio_lock_class); - irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip, - handle_simple_irq); -#ifdef CONFIG_ARM - set_irq_flags(irq, IRQF_VALID); -#else - irq_set_noprobe(irq); -#endif - } for (i = 0; i < kona_gpio->num_bank; i++) { bank = &kona_gpio->banks[i]; irq_set_chained_handler_and_data(bank->irq, diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c index 4630a8133..4c64627c6 100644 --- a/drivers/gpio/gpio-brcmstb.c +++ b/drivers/gpio/gpio-brcmstb.c @@ -17,6 +17,10 @@ #include #include #include +#include +#include +#include +#include #define GIO_BANK_SIZE 0x20 #define GIO_ODEN(bank) (((bank) * GIO_BANK_SIZE) + 0x00) @@ -34,14 +38,18 @@ struct brcmstb_gpio_bank { struct bgpio_chip bgc; struct brcmstb_gpio_priv *parent_priv; u32 width; + struct irq_chip irq_chip; }; struct brcmstb_gpio_priv { struct list_head bank_list; void __iomem *reg_base; - int num_banks; struct platform_device *pdev; + int parent_irq; int gpio_base; + bool can_wake; + int parent_wake_irq; + struct notifier_block reboot_notifier; }; #define MAX_GPIO_PER_BANK 32 @@ -63,6 +71,203 @@ brcmstb_gpio_gc_to_priv(struct gpio_chip *gc) return bank->parent_priv; } +static void brcmstb_gpio_set_imask(struct brcmstb_gpio_bank *bank, + unsigned int offset, bool enable) +{ + struct bgpio_chip *bgc = &bank->bgc; + struct brcmstb_gpio_priv *priv = bank->parent_priv; + u32 mask = bgc->pin2mask(bgc, offset); + u32 imask; + unsigned long flags; + + spin_lock_irqsave(&bgc->lock, flags); + imask = bgc->read_reg(priv->reg_base + GIO_MASK(bank->id)); + if (enable) + imask |= mask; + else + imask &= ~mask; + bgc->write_reg(priv->reg_base + GIO_MASK(bank->id), imask); + spin_unlock_irqrestore(&bgc->lock, flags); +} + +/* -------------------- IRQ chip functions -------------------- */ + +static void brcmstb_gpio_irq_mask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc); + + brcmstb_gpio_set_imask(bank, d->hwirq, false); +} + +static void brcmstb_gpio_irq_unmask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc); + + brcmstb_gpio_set_imask(bank, d->hwirq, true); +} + +static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc); + struct brcmstb_gpio_priv *priv = bank->parent_priv; + u32 mask = BIT(d->hwirq); + u32 edge_insensitive, iedge_insensitive; + u32 edge_config, iedge_config; + u32 level, ilevel; + unsigned long flags; + + switch (type) { + case IRQ_TYPE_LEVEL_LOW: + level = 0; + edge_config = 0; + edge_insensitive = 0; + break; + case IRQ_TYPE_LEVEL_HIGH: + level = mask; + edge_config = 0; + edge_insensitive = 0; + break; + case IRQ_TYPE_EDGE_FALLING: + level = 0; + edge_config = 0; + edge_insensitive = 0; + break; + case IRQ_TYPE_EDGE_RISING: + level = 0; + edge_config = mask; + edge_insensitive = 0; + break; + case IRQ_TYPE_EDGE_BOTH: + level = 0; + edge_config = 0; /* don't care, but want known value */ + edge_insensitive = mask; + break; + default: + return -EINVAL; + } + + spin_lock_irqsave(&bank->bgc.lock, flags); + + iedge_config = bank->bgc.read_reg(priv->reg_base + + GIO_EC(bank->id)) & ~mask; + iedge_insensitive = bank->bgc.read_reg(priv->reg_base + + GIO_EI(bank->id)) & ~mask; + ilevel = bank->bgc.read_reg(priv->reg_base + + GIO_LEVEL(bank->id)) & ~mask; + + bank->bgc.write_reg(priv->reg_base + GIO_EC(bank->id), + iedge_config | edge_config); + bank->bgc.write_reg(priv->reg_base + GIO_EI(bank->id), + iedge_insensitive | edge_insensitive); + bank->bgc.write_reg(priv->reg_base + GIO_LEVEL(bank->id), + ilevel | level); + + spin_unlock_irqrestore(&bank->bgc.lock, flags); + return 0; +} + +static int brcmstb_gpio_priv_set_wake(struct brcmstb_gpio_priv *priv, + unsigned int enable) +{ + int ret = 0; + + /* + * Only enable wake IRQ once for however many hwirqs can wake + * since they all use the same wake IRQ. Mask will be set + * up appropriately thanks to IRQCHIP_MASK_ON_SUSPEND flag. + */ + if (enable) + ret = enable_irq_wake(priv->parent_wake_irq); + else + ret = disable_irq_wake(priv->parent_wake_irq); + if (ret) + dev_err(&priv->pdev->dev, "failed to %s wake-up interrupt\n", + enable ? "enable" : "disable"); + return ret; +} + +static int brcmstb_gpio_irq_set_wake(struct irq_data *d, unsigned int enable) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc); + + return brcmstb_gpio_priv_set_wake(priv, enable); +} + +static irqreturn_t brcmstb_gpio_wake_irq_handler(int irq, void *data) +{ + struct brcmstb_gpio_priv *priv = data; + + if (!priv || irq != priv->parent_wake_irq) + return IRQ_NONE; + pm_wakeup_event(&priv->pdev->dev, 0); + return IRQ_HANDLED; +} + +static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank) +{ + struct brcmstb_gpio_priv *priv = bank->parent_priv; + struct irq_domain *irq_domain = bank->bgc.gc.irqdomain; + void __iomem *reg_base = priv->reg_base; + unsigned long status; + unsigned long flags; + + spin_lock_irqsave(&bank->bgc.lock, flags); + while ((status = bank->bgc.read_reg(reg_base + GIO_STAT(bank->id)) & + bank->bgc.read_reg(reg_base + GIO_MASK(bank->id)))) { + int bit; + + for_each_set_bit(bit, &status, 32) { + u32 stat = bank->bgc.read_reg(reg_base + + GIO_STAT(bank->id)); + if (bit >= bank->width) + dev_warn(&priv->pdev->dev, + "IRQ for invalid GPIO (bank=%d, offset=%d)\n", + bank->id, bit); + bank->bgc.write_reg(reg_base + GIO_STAT(bank->id), + stat | BIT(bit)); + generic_handle_irq(irq_find_mapping(irq_domain, bit)); + } + } + spin_unlock_irqrestore(&bank->bgc.lock, flags); +} + +/* Each UPG GIO block has one IRQ for all banks */ +static void brcmstb_gpio_irq_handler(struct irq_desc *desc) +{ + struct gpio_chip *gc = irq_desc_get_handler_data(desc); + struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc); + struct irq_chip *chip = irq_desc_get_chip(desc); + struct list_head *pos; + + /* Interrupts weren't properly cleared during probe */ + BUG_ON(!priv || !chip); + + chained_irq_enter(chip, desc); + list_for_each(pos, &priv->bank_list) { + struct brcmstb_gpio_bank *bank = + list_entry(pos, struct brcmstb_gpio_bank, node); + brcmstb_gpio_irq_bank_handler(bank); + } + chained_irq_exit(chip, desc); +} + +static int brcmstb_gpio_reboot(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct brcmstb_gpio_priv *priv = + container_of(nb, struct brcmstb_gpio_priv, reboot_notifier); + + /* Enable GPIO for S5 cold boot */ + if (action == SYS_POWER_OFF) + brcmstb_gpio_priv_set_wake(priv, 1); + + return NOTIFY_DONE; +} + /* Make sure that the number of banks matches up between properties */ static int brcmstb_gpio_sanity_check_banks(struct device *dev, struct device_node *np, struct resource *res) @@ -100,7 +305,13 @@ static int brcmstb_gpio_remove(struct platform_device *pdev) bank = list_entry(pos, struct brcmstb_gpio_bank, node); ret = bgpio_remove(&bank->bgc); if (ret) - dev_err(&pdev->dev, "gpiochip_remove fail in cleanup"); + dev_err(&pdev->dev, "gpiochip_remove fail in cleanup\n"); + } + if (priv->reboot_notifier.notifier_call) { + ret = unregister_reboot_notifier(&priv->reboot_notifier); + if (ret) + dev_err(&pdev->dev, + "failed to unregister reboot notifier\n"); } return ret; } @@ -121,7 +332,7 @@ static int brcmstb_gpio_of_xlate(struct gpio_chip *gc, return -EINVAL; offset = gpiospec->args[0] - (gc->base - priv->gpio_base); - if (offset >= gc->ngpio) + if (offset >= gc->ngpio || offset < 0) return -EINVAL; if (unlikely(offset >= bank->width)) { @@ -136,6 +347,65 @@ static int brcmstb_gpio_of_xlate(struct gpio_chip *gc, return offset; } +/* Before calling, must have bank->parent_irq set and gpiochip registered */ +static int brcmstb_gpio_irq_setup(struct platform_device *pdev, + struct brcmstb_gpio_bank *bank) +{ + struct brcmstb_gpio_priv *priv = bank->parent_priv; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + + bank->irq_chip.name = dev_name(dev); + bank->irq_chip.irq_mask = brcmstb_gpio_irq_mask; + bank->irq_chip.irq_unmask = brcmstb_gpio_irq_unmask; + bank->irq_chip.irq_set_type = brcmstb_gpio_irq_set_type; + + /* Ensures that all non-wakeup IRQs are disabled at suspend */ + bank->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND; + + if (IS_ENABLED(CONFIG_PM_SLEEP) && !priv->can_wake && + of_property_read_bool(np, "wakeup-source")) { + priv->parent_wake_irq = platform_get_irq(pdev, 1); + if (priv->parent_wake_irq < 0) { + dev_warn(dev, + "Couldn't get wake IRQ - GPIOs will not be able to wake from sleep"); + } else { + int err; + + /* + * Set wakeup capability before requesting wakeup + * interrupt, so we can process boot-time "wakeups" + * (e.g., from S5 cold boot) + */ + device_set_wakeup_capable(dev, true); + device_wakeup_enable(dev); + err = devm_request_irq(dev, priv->parent_wake_irq, + brcmstb_gpio_wake_irq_handler, 0, + "brcmstb-gpio-wake", priv); + + if (err < 0) { + dev_err(dev, "Couldn't request wake IRQ"); + return err; + } + + priv->reboot_notifier.notifier_call = + brcmstb_gpio_reboot; + register_reboot_notifier(&priv->reboot_notifier); + priv->can_wake = true; + } + } + + if (priv->can_wake) + bank->irq_chip.irq_set_wake = brcmstb_gpio_irq_set_wake; + + gpiochip_irqchip_add(&bank->bgc.gc, &bank->irq_chip, 0, + handle_simple_irq, IRQ_TYPE_NONE); + gpiochip_set_chained_irqchip(&bank->bgc.gc, &bank->irq_chip, + priv->parent_irq, brcmstb_gpio_irq_handler); + + return 0; +} + static int brcmstb_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -146,6 +416,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) struct property *prop; const __be32 *p; u32 bank_width; + int num_banks = 0; int err; static int gpio_base; @@ -164,6 +435,16 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) priv->reg_base = reg_base; priv->pdev = pdev; + if (of_property_read_bool(np, "interrupt-controller")) { + priv->parent_irq = platform_get_irq(pdev, 0); + if (priv->parent_irq <= 0) { + dev_err(dev, "Couldn't get IRQ"); + return -ENOENT; + } + } else { + priv->parent_irq = -ENOENT; + } + if (brcmstb_gpio_sanity_check_banks(dev, np, res)) return -EINVAL; @@ -180,7 +461,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) } bank->parent_priv = priv; - bank->id = priv->num_banks; + bank->id = num_banks; if (bank_width <= 0 || bank_width > MAX_GPIO_PER_BANK) { dev_err(dev, "Invalid bank width %d\n", bank_width); goto fail; @@ -212,6 +493,12 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) /* not all ngpio lines are valid, will use bank width later */ gc->ngpio = MAX_GPIO_PER_BANK; + /* + * Mask all interrupts by default, since wakeup interrupts may + * be retained from S5 cold boot + */ + bank->bgc.write_reg(reg_base + GIO_MASK(bank->id), 0); + err = gpiochip_add(gc); if (err) { dev_err(dev, "Could not add gpiochip for bank %d\n", @@ -219,17 +506,24 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) goto fail; } gpio_base += gc->ngpio; + + if (priv->parent_irq > 0) { + err = brcmstb_gpio_irq_setup(pdev, bank); + if (err) + goto fail; + } + dev_dbg(dev, "bank=%d, base=%d, ngpio=%d, width=%d\n", bank->id, gc->base, gc->ngpio, bank->width); /* Everything looks good, so add bank to list */ list_add(&bank->node, &priv->bank_list); - priv->num_banks++; + num_banks++; } dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n", - priv->num_banks, priv->gpio_base, gpio_base - 1); + num_banks, priv->gpio_base, gpio_base - 1); return 0; diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index c246ac3dd..5e7153888 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -65,11 +65,11 @@ static struct davinci_gpio_regs __iomem *gpio2regs(unsigned gpio) return ptr; } -static inline struct davinci_gpio_regs __iomem *irq2regs(int irq) +static inline struct davinci_gpio_regs __iomem *irq2regs(struct irq_data *d) { struct davinci_gpio_regs __iomem *g; - g = (__force struct davinci_gpio_regs __iomem *)irq_get_chip_data(irq); + g = (__force struct davinci_gpio_regs __iomem *)irq_data_get_irq_chip_data(d); return g; } @@ -287,7 +287,7 @@ static int davinci_gpio_probe(struct platform_device *pdev) static void gpio_irq_disable(struct irq_data *d) { - struct davinci_gpio_regs __iomem *g = irq2regs(d->irq); + struct davinci_gpio_regs __iomem *g = irq2regs(d); u32 mask = (u32) irq_data_get_irq_handler_data(d); writel_relaxed(mask, &g->clr_falling); @@ -296,7 +296,7 @@ static void gpio_irq_disable(struct irq_data *d) static void gpio_irq_enable(struct irq_data *d) { - struct davinci_gpio_regs __iomem *g = irq2regs(d->irq); + struct davinci_gpio_regs __iomem *g = irq2regs(d); u32 mask = (u32) irq_data_get_irq_handler_data(d); unsigned status = irqd_get_trigger_type(d); @@ -326,9 +326,9 @@ static struct irq_chip gpio_irqchip = { .flags = IRQCHIP_SET_TYPE_MASKED, }; -static void -gpio_irq_handler(unsigned irq, struct irq_desc *desc) +static void gpio_irq_handler(struct irq_desc *desc) { + unsigned int irq = irq_desc_get_irq(desc); struct davinci_gpio_regs __iomem *g; u32 mask = 0xffff; struct davinci_gpio_controller *d; @@ -396,7 +396,7 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger) struct davinci_gpio_regs __iomem *g; u32 mask; - d = (struct davinci_gpio_controller *)data->handler_data; + d = (struct davinci_gpio_controller *)irq_data_get_irq_handler_data(data); g = (struct davinci_gpio_regs __iomem *)d->regs; mask = __gpio_mask(data->irq - d->gpio_irq); @@ -422,7 +422,6 @@ davinci_gpio_irq_map(struct irq_domain *d, unsigned int irq, irq_set_irq_type(irq, IRQ_TYPE_NONE); irq_set_chip_data(irq, (__force void *)g); irq_set_handler_data(irq, (void *)__gpio_mask(hw)); - set_irq_flags(irq, IRQF_VALID); return 0; } @@ -545,7 +544,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev) chips[0].chip.to_irq = gpio_to_irq_unbanked; chips[0].gpio_irq = bank_irq; chips[0].gpio_unbanked = pdata->gpio_unbanked; - binten = BIT(0); + binten = GENMASK(pdata->gpio_unbanked / 16, 0); /* AINTC handles mask/unmask; GPIO handles triggering */ irq = bank_irq; diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index 55fa9853a..fcd5b0acf 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c @@ -147,9 +147,9 @@ static u32 dwapb_do_irq(struct dwapb_gpio *gpio) return ret; } -static void dwapb_irq_handler(u32 irq, struct irq_desc *desc) +static void dwapb_irq_handler(struct irq_desc *desc) { - struct dwapb_gpio *gpio = irq_get_handler_data(irq); + struct dwapb_gpio *gpio = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); dwapb_do_irq(gpio); diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c index fbf287307..6bca1e125 100644 --- a/drivers/gpio/gpio-em.c +++ b/drivers/gpio/gpio-em.c @@ -31,7 +31,6 @@ #include #include #include -#include struct em_gio_priv { void __iomem *base0; @@ -262,7 +261,6 @@ static int em_gio_irq_domain_map(struct irq_domain *h, unsigned int irq, irq_set_chip_data(irq, h->host_data); irq_set_chip_and_handler(irq, &p->irq_chip, handle_level_irq); - set_irq_flags(irq, IRQF_VALID); /* kill me now */ return 0; } @@ -273,13 +271,12 @@ static const struct irq_domain_ops em_gio_irq_domain_ops = { static int em_gio_probe(struct platform_device *pdev) { - struct gpio_em_config pdata_dt; - struct gpio_em_config *pdata = dev_get_platdata(&pdev->dev); struct em_gio_priv *p; struct resource *io[2], *irq[2]; struct gpio_chip *gpio_chip; struct irq_chip *irq_chip; const char *name = dev_name(&pdev->dev); + unsigned int ngpios; int ret; p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); @@ -319,18 +316,10 @@ static int em_gio_probe(struct platform_device *pdev) goto err0; } - if (!pdata) { - memset(&pdata_dt, 0, sizeof(pdata_dt)); - pdata = &pdata_dt; - - if (of_property_read_u32(pdev->dev.of_node, "ngpios", - &pdata->number_of_pins)) { - dev_err(&pdev->dev, "Missing ngpios OF property\n"); - ret = -EINVAL; - goto err0; - } - - pdata->gpio_base = -1; + if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) { + dev_err(&pdev->dev, "Missing ngpios OF property\n"); + ret = -EINVAL; + goto err0; } gpio_chip = &p->gpio_chip; @@ -345,8 +334,8 @@ static int em_gio_probe(struct platform_device *pdev) gpio_chip->label = name; gpio_chip->dev = &pdev->dev; gpio_chip->owner = THIS_MODULE; - gpio_chip->base = pdata->gpio_base; - gpio_chip->ngpio = pdata->number_of_pins; + gpio_chip->base = -1; + gpio_chip->ngpio = ngpios; irq_chip = &p->irq_chip; irq_chip->name = name; @@ -357,9 +346,7 @@ static int em_gio_probe(struct platform_device *pdev) irq_chip->irq_release_resources = em_gio_irq_relres; irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND; - p->irq_domain = irq_domain_add_simple(pdev->dev.of_node, - pdata->number_of_pins, - pdata->irq_base, + p->irq_domain = irq_domain_add_simple(pdev->dev.of_node, ngpios, 0, &em_gio_irq_domain_ops, p); if (!p->irq_domain) { ret = -ENXIO; @@ -387,12 +374,6 @@ static int em_gio_probe(struct platform_device *pdev) goto err1; } - if (pdata->pctl_name) { - ret = gpiochip_add_pin_range(gpio_chip, pdata->pctl_name, 0, - gpio_chip->base, gpio_chip->ngpio); - if (ret < 0) - dev_warn(&pdev->dev, "failed to add pin range\n"); - } return 0; err1: diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c index 45684f36d..3e3947b35 100644 --- a/drivers/gpio/gpio-ep93xx.c +++ b/drivers/gpio/gpio-ep93xx.c @@ -78,7 +78,7 @@ static void ep93xx_gpio_int_debounce(unsigned int irq, bool enable) EP93XX_GPIO_REG(int_debounce_register_offset[port])); } -static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc) +static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc) { unsigned char status; int i; @@ -100,13 +100,14 @@ static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc) } } -static void ep93xx_gpio_f_irq_handler(unsigned int irq, struct irq_desc *desc) +static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc) { /* * map discontiguous hw irq range to continuous sw irq range: * * IRQ_EP93XX_GPIO{0..7}MUX -> gpio_to_irq(EP93XX_GPIO_LINE_F({0..7}) */ + unsigned int irq = irq_desc_get_irq(desc); int port_f_idx = ((irq + 1) & 7) ^ 4; /* {19..22,47..50} -> {0..7} */ int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_F(0)) + port_f_idx; @@ -208,7 +209,7 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type) return -EINVAL; } - __irq_set_handler_locked(d->irq, handler); + irq_set_handler_locked(d, handler); gpio_int_enabled[port] |= port_mask; @@ -234,7 +235,7 @@ static void ep93xx_gpio_init_irq(void) gpio_irq <= gpio_to_irq(EP93XX_GPIO_LINE_MAX_IRQ); ++gpio_irq) { irq_set_chip_and_handler(gpio_irq, &ep93xx_gpio_irq_chip, handle_level_irq); - set_irq_flags(gpio_irq, IRQF_VALID); + irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST); } irq_set_chained_handler(IRQ_EP93XX_GPIO_AB, diff --git a/drivers/gpio/gpio-etraxfs.c b/drivers/gpio/gpio-etraxfs.c index 28071f4a5..2ffcd9fdd 100644 --- a/drivers/gpio/gpio-etraxfs.c +++ b/drivers/gpio/gpio-etraxfs.c @@ -1,8 +1,10 @@ #include #include #include +#include #include #include +#include #include #include @@ -13,6 +15,7 @@ #define ETRAX_FS_rw_intr_mask 16 #define ETRAX_FS_rw_ack_intr 20 #define ETRAX_FS_r_intr 24 +#define ETRAX_FS_r_masked_intr 28 #define ETRAX_FS_rw_pb_dout 32 #define ETRAX_FS_r_pb_din 36 #define ETRAX_FS_rw_pb_oe 40 @@ -26,6 +29,48 @@ #define ETRAX_FS_r_pe_din 84 #define ETRAX_FS_rw_pe_oe 88 +#define ARTPEC3_r_pa_din 0 +#define ARTPEC3_rw_pa_dout 4 +#define ARTPEC3_rw_pa_oe 8 +#define ARTPEC3_r_pb_din 44 +#define ARTPEC3_rw_pb_dout 48 +#define ARTPEC3_rw_pb_oe 52 +#define ARTPEC3_r_pc_din 88 +#define ARTPEC3_rw_pc_dout 92 +#define ARTPEC3_rw_pc_oe 96 +#define ARTPEC3_r_pd_din 116 +#define ARTPEC3_rw_intr_cfg 120 +#define ARTPEC3_rw_intr_pins 124 +#define ARTPEC3_rw_intr_mask 128 +#define ARTPEC3_rw_ack_intr 132 +#define ARTPEC3_r_masked_intr 140 + +#define GIO_CFG_OFF 0 +#define GIO_CFG_HI 1 +#define GIO_CFG_LO 2 +#define GIO_CFG_SET 3 +#define GIO_CFG_POSEDGE 5 +#define GIO_CFG_NEGEDGE 6 +#define GIO_CFG_ANYEDGE 7 + +struct etraxfs_gpio_info; + +struct etraxfs_gpio_block { + spinlock_t lock; + u32 mask; + u32 cfg; + u32 pins; + unsigned int group[8]; + + void __iomem *regs; + const struct etraxfs_gpio_info *info; +}; + +struct etraxfs_gpio_chip { + struct bgpio_chip bgc; + struct etraxfs_gpio_block *block; +}; + struct etraxfs_gpio_port { const char *label; unsigned int oe; @@ -37,6 +82,12 @@ struct etraxfs_gpio_port { struct etraxfs_gpio_info { unsigned int num_ports; const struct etraxfs_gpio_port *ports; + + unsigned int rw_ack_intr; + unsigned int rw_intr_mask; + unsigned int rw_intr_cfg; + unsigned int rw_intr_pins; + unsigned int r_masked_intr; }; static const struct etraxfs_gpio_port etraxfs_gpio_etraxfs_ports[] = { @@ -80,8 +131,56 @@ static const struct etraxfs_gpio_port etraxfs_gpio_etraxfs_ports[] = { static const struct etraxfs_gpio_info etraxfs_gpio_etraxfs = { .num_ports = ARRAY_SIZE(etraxfs_gpio_etraxfs_ports), .ports = etraxfs_gpio_etraxfs_ports, + .rw_ack_intr = ETRAX_FS_rw_ack_intr, + .rw_intr_mask = ETRAX_FS_rw_intr_mask, + .rw_intr_cfg = ETRAX_FS_rw_intr_cfg, + .r_masked_intr = ETRAX_FS_r_masked_intr, +}; + +static const struct etraxfs_gpio_port etraxfs_gpio_artpec3_ports[] = { + { + .label = "A", + .ngpio = 32, + .oe = ARTPEC3_rw_pa_oe, + .dout = ARTPEC3_rw_pa_dout, + .din = ARTPEC3_r_pa_din, + }, + { + .label = "B", + .ngpio = 32, + .oe = ARTPEC3_rw_pb_oe, + .dout = ARTPEC3_rw_pb_dout, + .din = ARTPEC3_r_pb_din, + }, + { + .label = "C", + .ngpio = 16, + .oe = ARTPEC3_rw_pc_oe, + .dout = ARTPEC3_rw_pc_dout, + .din = ARTPEC3_r_pc_din, + }, + { + .label = "D", + .ngpio = 32, + .din = ARTPEC3_r_pd_din, + }, +}; + +static const struct etraxfs_gpio_info etraxfs_gpio_artpec3 = { + .num_ports = ARRAY_SIZE(etraxfs_gpio_artpec3_ports), + .ports = etraxfs_gpio_artpec3_ports, + .rw_ack_intr = ARTPEC3_rw_ack_intr, + .rw_intr_mask = ARTPEC3_rw_intr_mask, + .rw_intr_cfg = ARTPEC3_rw_intr_cfg, + .r_masked_intr = ARTPEC3_r_masked_intr, + .rw_intr_pins = ARTPEC3_rw_intr_pins, }; +static unsigned int etraxfs_gpio_chip_to_port(struct gpio_chip *gc) +{ + return gc->label[0] - 'A'; +} + static int etraxfs_gpio_of_xlate(struct gpio_chip *gc, const struct of_phandle_args *gpiospec, u32 *flags) @@ -90,7 +189,7 @@ static int etraxfs_gpio_of_xlate(struct gpio_chip *gc, * Port numbers are A to E, and the properties are integers, so we * specify them as 0xA - 0xE. */ - if (gc->label[0] - 'A' + 0xA != gpiospec->args[2]) + if (etraxfs_gpio_chip_to_port(gc) + 0xA != gpiospec->args[2]) return -EINVAL; return of_gpio_simple_xlate(gc, gpiospec, flags); @@ -101,24 +200,174 @@ static const struct of_device_id etraxfs_gpio_of_table[] = { .compatible = "axis,etraxfs-gio", .data = &etraxfs_gpio_etraxfs, }, + { + .compatible = "axis,artpec3-gio", + .data = &etraxfs_gpio_artpec3, + }, {}, }; +static unsigned int etraxfs_gpio_to_group_irq(unsigned int gpio) +{ + return gpio % 8; +} + +static unsigned int etraxfs_gpio_to_group_pin(struct etraxfs_gpio_chip *chip, + unsigned int gpio) +{ + return 4 * etraxfs_gpio_chip_to_port(&chip->bgc.gc) + gpio / 8; +} + +static void etraxfs_gpio_irq_ack(struct irq_data *d) +{ + struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d); + struct etraxfs_gpio_block *block = chip->block; + unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq); + + writel(BIT(grpirq), block->regs + block->info->rw_ack_intr); +} + +static void etraxfs_gpio_irq_mask(struct irq_data *d) +{ + struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d); + struct etraxfs_gpio_block *block = chip->block; + unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq); + + spin_lock(&block->lock); + block->mask &= ~BIT(grpirq); + writel(block->mask, block->regs + block->info->rw_intr_mask); + spin_unlock(&block->lock); +} + +static void etraxfs_gpio_irq_unmask(struct irq_data *d) +{ + struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d); + struct etraxfs_gpio_block *block = chip->block; + unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq); + + spin_lock(&block->lock); + block->mask |= BIT(grpirq); + writel(block->mask, block->regs + block->info->rw_intr_mask); + spin_unlock(&block->lock); +} + +static int etraxfs_gpio_irq_set_type(struct irq_data *d, u32 type) +{ + struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d); + struct etraxfs_gpio_block *block = chip->block; + unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq); + u32 cfg; + + switch (type) { + case IRQ_TYPE_EDGE_RISING: + cfg = GIO_CFG_POSEDGE; + break; + case IRQ_TYPE_EDGE_FALLING: + cfg = GIO_CFG_NEGEDGE; + break; + case IRQ_TYPE_EDGE_BOTH: + cfg = GIO_CFG_ANYEDGE; + break; + case IRQ_TYPE_LEVEL_LOW: + cfg = GIO_CFG_LO; + break; + case IRQ_TYPE_LEVEL_HIGH: + cfg = GIO_CFG_HI; + break; + default: + return -EINVAL; + } + + spin_lock(&block->lock); + block->cfg &= ~(0x7 << (grpirq * 3)); + block->cfg |= (cfg << (grpirq * 3)); + writel(block->cfg, block->regs + block->info->rw_intr_cfg); + spin_unlock(&block->lock); + + return 0; +} + +static int etraxfs_gpio_irq_request_resources(struct irq_data *d) +{ + struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d); + struct etraxfs_gpio_block *block = chip->block; + unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq); + int ret = -EBUSY; + + spin_lock(&block->lock); + if (block->group[grpirq]) + goto out; + + ret = gpiochip_lock_as_irq(&chip->bgc.gc, d->hwirq); + if (ret) + goto out; + + block->group[grpirq] = d->irq; + if (block->info->rw_intr_pins) { + unsigned int pin = etraxfs_gpio_to_group_pin(chip, d->hwirq); + + block->pins &= ~(0xf << (grpirq * 4)); + block->pins |= (pin << (grpirq * 4)); + + writel(block->pins, block->regs + block->info->rw_intr_pins); + } + +out: + spin_unlock(&block->lock); + return ret; +} + +static void etraxfs_gpio_irq_release_resources(struct irq_data *d) +{ + struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d); + struct etraxfs_gpio_block *block = chip->block; + unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq); + + spin_lock(&block->lock); + block->group[grpirq] = 0; + gpiochip_unlock_as_irq(&chip->bgc.gc, d->hwirq); + spin_unlock(&block->lock); +} + +static struct irq_chip etraxfs_gpio_irq_chip = { + .name = "gpio-etraxfs", + .irq_ack = etraxfs_gpio_irq_ack, + .irq_mask = etraxfs_gpio_irq_mask, + .irq_unmask = etraxfs_gpio_irq_unmask, + .irq_set_type = etraxfs_gpio_irq_set_type, + .irq_request_resources = etraxfs_gpio_irq_request_resources, + .irq_release_resources = etraxfs_gpio_irq_release_resources, +}; + +static irqreturn_t etraxfs_gpio_interrupt(int irq, void *dev_id) +{ + struct etraxfs_gpio_block *block = dev_id; + unsigned long intr = readl(block->regs + block->info->r_masked_intr); + int bit; + + for_each_set_bit(bit, &intr, 8) + generic_handle_irq(block->group[bit]); + + return IRQ_RETVAL(intr & 0xff); +} + static int etraxfs_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct etraxfs_gpio_info *info; const struct of_device_id *match; - struct bgpio_chip *chips; - struct resource *res; + struct etraxfs_gpio_block *block; + struct etraxfs_gpio_chip *chips; + struct resource *res, *irq; + bool allportsirq = false; void __iomem *regs; int ret; int i; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); regs = devm_ioremap_resource(dev, res); - if (!regs) - return -ENOMEM; + if (IS_ERR(regs)) + return PTR_ERR(regs); match = of_match_node(etraxfs_gpio_of_table, dev->of_node); if (!match) @@ -130,19 +379,57 @@ static int etraxfs_gpio_probe(struct platform_device *pdev) if (!chips) return -ENOMEM; + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!irq) + return -EINVAL; + + block = devm_kzalloc(dev, sizeof(*block), GFP_KERNEL); + if (!block) + return -ENOMEM; + + spin_lock_init(&block->lock); + + block->regs = regs; + block->info = info; + + writel(0, block->regs + info->rw_intr_mask); + writel(0, block->regs + info->rw_intr_cfg); + if (info->rw_intr_pins) { + allportsirq = true; + writel(0, block->regs + info->rw_intr_pins); + } + + ret = devm_request_irq(dev, irq->start, etraxfs_gpio_interrupt, + IRQF_SHARED, dev_name(dev), block); + if (ret) { + dev_err(dev, "Unable to request irq %d\n", ret); + return ret; + } + for (i = 0; i < info->num_ports; i++) { - struct bgpio_chip *bgc = &chips[i]; + struct etraxfs_gpio_chip *chip = &chips[i]; + struct bgpio_chip *bgc = &chip->bgc; const struct etraxfs_gpio_port *port = &info->ports[i]; + unsigned long flags = BGPIOF_READ_OUTPUT_REG_SET; + void __iomem *dat = regs + port->din; + void __iomem *set = regs + port->dout; + void __iomem *dirout = regs + port->oe; + + chip->block = block; + + if (dirout == set) { + dirout = set = NULL; + flags = BGPIOF_NO_OUTPUT; + } ret = bgpio_init(bgc, dev, 4, - regs + port->din, /* dat */ - regs + port->dout, /* set */ - NULL, /* clr */ - regs + port->oe, /* dirout */ - NULL, /* dirin */ - BGPIOF_UNREADABLE_REG_SET); - if (ret) - return ret; + dat, set, NULL, dirout, NULL, + flags); + if (ret) { + dev_err(dev, "Unable to init port %s\n", + port->label); + continue; + } bgc->gc.ngpio = port->ngpio; bgc->gc.label = port->label; @@ -152,9 +439,21 @@ static int etraxfs_gpio_probe(struct platform_device *pdev) bgc->gc.of_xlate = etraxfs_gpio_of_xlate; ret = gpiochip_add(&bgc->gc); - if (ret) + if (ret) { dev_err(dev, "Unable to register port %s\n", bgc->gc.label); + continue; + } + + if (i > 0 && !allportsirq) + continue; + + ret = gpiochip_irqchip_add(&bgc->gc, &etraxfs_gpio_irq_chip, 0, + handle_level_irq, IRQ_TYPE_NONE); + if (ret) { + dev_err(dev, "Unable to add irqchip to port %s\n", + bgc->gc.label); + } } return 0; diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c index 9bda3727f..a3f07537f 100644 --- a/drivers/gpio/gpio-generic.c +++ b/drivers/gpio/gpio-generic.c @@ -153,6 +153,10 @@ static int bgpio_get(struct gpio_chip *gc, unsigned int gpio) return !!(bgc->read_reg(bgc->reg_dat) & bgc->pin2mask(bgc, gpio)); } +static void bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val) +{ +} + static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct bgpio_chip *bgc = to_bgpio_chip(gc); @@ -279,6 +283,12 @@ static int bgpio_simple_dir_in(struct gpio_chip *gc, unsigned int gpio) return 0; } +static int bgpio_dir_out_err(struct gpio_chip *gc, unsigned int gpio, + int val) +{ + return -EINVAL; +} + static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { @@ -302,6 +312,14 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio) return 0; } +static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio) +{ + struct bgpio_chip *bgc = to_bgpio_chip(gc); + + return (bgc->read_reg(bgc->reg_dir) & bgc->pin2mask(bgc, gpio)) ? + GPIOF_DIR_OUT : GPIOF_DIR_IN; +} + static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { struct bgpio_chip *bgc = to_bgpio_chip(gc); @@ -351,6 +369,14 @@ static int bgpio_dir_out_inv(struct gpio_chip *gc, unsigned int gpio, int val) return 0; } +static int bgpio_get_dir_inv(struct gpio_chip *gc, unsigned int gpio) +{ + struct bgpio_chip *bgc = to_bgpio_chip(gc); + + return (bgc->read_reg(bgc->reg_dir) & bgc->pin2mask(bgc, gpio)) ? + GPIOF_DIR_IN : GPIOF_DIR_OUT; +} + static int bgpio_setup_accessors(struct device *dev, struct bgpio_chip *bgc, bool bit_be, @@ -444,6 +470,9 @@ static int bgpio_setup_io(struct bgpio_chip *bgc, bgc->reg_set = set; bgc->gc.set = bgpio_set_set; bgc->gc.set_multiple = bgpio_set_multiple_set; + } else if (flags & BGPIOF_NO_OUTPUT) { + bgc->gc.set = bgpio_set_none; + bgc->gc.set_multiple = NULL; } else { bgc->gc.set = bgpio_set; bgc->gc.set_multiple = bgpio_set_multiple; @@ -460,7 +489,8 @@ static int bgpio_setup_io(struct bgpio_chip *bgc, static int bgpio_setup_direction(struct bgpio_chip *bgc, void __iomem *dirout, - void __iomem *dirin) + void __iomem *dirin, + unsigned long flags) { if (dirout && dirin) { return -EINVAL; @@ -468,12 +498,17 @@ static int bgpio_setup_direction(struct bgpio_chip *bgc, bgc->reg_dir = dirout; bgc->gc.direction_output = bgpio_dir_out; bgc->gc.direction_input = bgpio_dir_in; + bgc->gc.get_direction = bgpio_get_dir; } else if (dirin) { bgc->reg_dir = dirin; bgc->gc.direction_output = bgpio_dir_out_inv; bgc->gc.direction_input = bgpio_dir_in_inv; + bgc->gc.get_direction = bgpio_get_dir_inv; } else { - bgc->gc.direction_output = bgpio_simple_dir_out; + if (flags & BGPIOF_NO_OUTPUT) + bgc->gc.direction_output = bgpio_dir_out_err; + else + bgc->gc.direction_output = bgpio_simple_dir_out; bgc->gc.direction_input = bgpio_simple_dir_in; } @@ -525,7 +560,7 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev, if (ret) return ret; - ret = bgpio_setup_direction(bgc, dirout, dirin); + ret = bgpio_setup_direction(bgc, dirout, dirin, flags); if (ret) return ret; diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c index 0a8f7617e..801423fe8 100644 --- a/drivers/gpio/gpio-grgpio.c +++ b/drivers/gpio/gpio-grgpio.c @@ -104,17 +104,12 @@ static void grgpio_set_imask(struct grgpio_priv *priv, unsigned int offset, { struct bgpio_chip *bgc = &priv->bgc; unsigned long mask = bgc->pin2mask(bgc, offset); - unsigned long flags; - - spin_lock_irqsave(&bgc->lock, flags); if (val) priv->imask |= mask; else priv->imask &= ~mask; bgc->write_reg(priv->regs + GRGPIO_IMASK, priv->imask); - - spin_unlock_irqrestore(&bgc->lock, flags); } static int grgpio_to_irq(struct gpio_chip *gc, unsigned offset) @@ -180,16 +175,26 @@ static void grgpio_irq_mask(struct irq_data *d) { struct grgpio_priv *priv = irq_data_get_irq_chip_data(d); int offset = d->hwirq; + unsigned long flags; + + spin_lock_irqsave(&priv->bgc.lock, flags); grgpio_set_imask(priv, offset, 0); + + spin_unlock_irqrestore(&priv->bgc.lock, flags); } static void grgpio_irq_unmask(struct irq_data *d) { struct grgpio_priv *priv = irq_data_get_irq_chip_data(d); int offset = d->hwirq; + unsigned long flags; + + spin_lock_irqsave(&priv->bgc.lock, flags); grgpio_set_imask(priv, offset, 1); + + spin_unlock_irqrestore(&priv->bgc.lock, flags); } static struct irq_chip grgpio_irq_chip = { @@ -281,12 +286,7 @@ static int grgpio_irq_map(struct irq_domain *d, unsigned int irq, irq_set_chip_data(irq, priv); irq_set_chip_and_handler(irq, &grgpio_irq_chip, handle_simple_irq); - irq_clear_status_flags(irq, IRQ_NOREQUEST); -#ifdef CONFIG_ARM - set_irq_flags(irq, IRQF_VALID); -#else irq_set_noprobe(irq); -#endif return ret; } @@ -301,9 +301,6 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq) int ngpio = priv->bgc.gc.ngpio; int i; -#ifdef CONFIG_ARM - set_irq_flags(irq, 0); -#endif irq_set_chip_and_handler(irq, NULL, NULL); irq_set_chip_data(irq, NULL); diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c index aa28c65eb..70097472b 100644 --- a/drivers/gpio/gpio-intel-mid.c +++ b/drivers/gpio/gpio-intel-mid.c @@ -301,7 +301,7 @@ static const struct pci_device_id intel_gpio_ids[] = { }; MODULE_DEVICE_TABLE(pci, intel_gpio_ids); -static void intel_mid_irq_handler(unsigned irq, struct irq_desc *desc) +static void intel_mid_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct intel_mid_gpio *priv = to_intel_gpio_priv(gc); diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c index 153af464c..127c37b38 100644 --- a/drivers/gpio/gpio-lynxpoint.c +++ b/drivers/gpio/gpio-lynxpoint.c @@ -234,7 +234,7 @@ static int lp_gpio_direction_output(struct gpio_chip *chip, return 0; } -static void lp_gpio_irq_handler(unsigned hwirq, struct irq_desc *desc) +static void lp_gpio_irq_handler(struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct gpio_chip *gc = irq_desc_get_handler_data(desc); diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c index 7d3c90e9d..8c5252c6c 100644 --- a/drivers/gpio/gpio-max732x.c +++ b/drivers/gpio/gpio-max732x.c @@ -685,9 +685,14 @@ static int max732x_probe(struct i2c_client *client, mutex_init(&chip->lock); - max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]); - if (nr_port > 8) - max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]); + ret = max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]); + if (ret) + goto out_failed; + if (nr_port > 8) { + ret = max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]); + if (ret) + goto out_failed; + } ret = gpiochip_add(&chip->gpio_chip); if (ret) diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c index 2fc7ff852..73db7ecd7 100644 --- a/drivers/gpio/gpio-mcp23s08.c +++ b/drivers/gpio/gpio-mcp23s08.c @@ -507,11 +507,7 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp) irq_set_chip_data(irq, mcp); irq_set_chip(irq, &mcp23s08_irq_chip); irq_set_nested_thread(irq, true); -#ifdef CONFIG_ARM - set_irq_flags(irq, IRQF_VALID); -#else irq_set_noprobe(irq); -#endif } return 0; } diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index 20aa66f34..48ef36834 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c @@ -32,7 +32,7 @@ struct mpc8xxx_gpio_chip { struct of_mm_gpio_chip mm_gc; - spinlock_t lock; + raw_spinlock_t lock; /* * shadowed data register to be able to clear/set output pins in @@ -95,7 +95,7 @@ static void mpc8xxx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); unsigned long flags; - spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); if (val) mpc8xxx_gc->data |= mpc8xxx_gpio2mask(gpio); @@ -104,7 +104,7 @@ static void mpc8xxx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data); - spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); + raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); } static void mpc8xxx_gpio_set_multiple(struct gpio_chip *gc, @@ -115,7 +115,7 @@ static void mpc8xxx_gpio_set_multiple(struct gpio_chip *gc, unsigned long flags; int i; - spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); for (i = 0; i < gc->ngpio; i++) { if (*mask == 0) @@ -130,7 +130,7 @@ static void mpc8xxx_gpio_set_multiple(struct gpio_chip *gc, out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data); - spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); + raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); } static int mpc8xxx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) @@ -139,11 +139,11 @@ static int mpc8xxx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); unsigned long flags; - spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio)); - spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); + raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); return 0; } @@ -156,11 +156,11 @@ static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val mpc8xxx_gpio_set(gc, gpio, val); - spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); setbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio)); - spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); + raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); return 0; } @@ -174,6 +174,15 @@ static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val return mpc8xxx_gpio_dir_out(gc, gpio, val); } +static int mpc5125_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) +{ + /* GPIO 0..3 are input only on MPC5125 */ + if (gpio <= 3) + return -EINVAL; + + return mpc8xxx_gpio_dir_out(gc, gpio, val); +} + static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset) { struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); @@ -185,7 +194,7 @@ static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset) return -ENXIO; } -static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc) +static void mpc8xxx_gpio_irq_cascade(struct irq_desc *desc) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); @@ -206,11 +215,11 @@ static void mpc8xxx_irq_unmask(struct irq_data *d) struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; unsigned long flags; - spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); - spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); + raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); } static void mpc8xxx_irq_mask(struct irq_data *d) @@ -219,11 +228,11 @@ static void mpc8xxx_irq_mask(struct irq_data *d) struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; unsigned long flags; - spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); - spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); + raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); } static void mpc8xxx_irq_ack(struct irq_data *d) @@ -242,17 +251,17 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type) switch (flow_type) { case IRQ_TYPE_EDGE_FALLING: - spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); setbits32(mm->regs + GPIO_ICR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); - spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); + raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; case IRQ_TYPE_EDGE_BOTH: - spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrbits32(mm->regs + GPIO_ICR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); - spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); + raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; default: @@ -282,22 +291,22 @@ static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type) switch (flow_type) { case IRQ_TYPE_EDGE_FALLING: case IRQ_TYPE_LEVEL_LOW: - spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrsetbits_be32(reg, 3 << shift, 2 << shift); - spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); + raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; case IRQ_TYPE_EDGE_RISING: case IRQ_TYPE_LEVEL_HIGH: - spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrsetbits_be32(reg, 3 << shift, 1 << shift); - spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); + raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; case IRQ_TYPE_EDGE_BOTH: - spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrbits32(reg, 3 << shift); - spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); + raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; default: @@ -312,17 +321,13 @@ static struct irq_chip mpc8xxx_irq_chip = { .irq_unmask = mpc8xxx_irq_unmask, .irq_mask = mpc8xxx_irq_mask, .irq_ack = mpc8xxx_irq_ack, + /* this might get overwritten in mpc8xxx_probe() */ .irq_set_type = mpc8xxx_irq_set_type, }; static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq, irq_hw_number_t hwirq) { - struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data; - - if (mpc8xxx_gc->of_dev_id_data) - mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data; - irq_set_chip_data(irq, h->host_data); irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq); @@ -334,11 +339,38 @@ static const struct irq_domain_ops mpc8xxx_gpio_irq_ops = { .xlate = irq_domain_xlate_twocell, }; -static struct of_device_id mpc8xxx_gpio_ids[] = { +struct mpc8xxx_gpio_devtype { + int (*gpio_dir_out)(struct gpio_chip *, unsigned int, int); + int (*gpio_get)(struct gpio_chip *, unsigned int); + int (*irq_set_type)(struct irq_data *, unsigned int); +}; + +static const struct mpc8xxx_gpio_devtype mpc512x_gpio_devtype = { + .gpio_dir_out = mpc5121_gpio_dir_out, + .irq_set_type = mpc512x_irq_set_type, +}; + +static const struct mpc8xxx_gpio_devtype mpc5125_gpio_devtype = { + .gpio_dir_out = mpc5125_gpio_dir_out, + .irq_set_type = mpc512x_irq_set_type, +}; + +static const struct mpc8xxx_gpio_devtype mpc8572_gpio_devtype = { + .gpio_get = mpc8572_gpio_get, +}; + +static const struct mpc8xxx_gpio_devtype mpc8xxx_gpio_devtype_default = { + .gpio_dir_out = mpc8xxx_gpio_dir_out, + .gpio_get = mpc8xxx_gpio_get, + .irq_set_type = mpc8xxx_irq_set_type, +}; + +static const struct of_device_id mpc8xxx_gpio_ids[] = { { .compatible = "fsl,mpc8349-gpio", }, - { .compatible = "fsl,mpc8572-gpio", }, + { .compatible = "fsl,mpc8572-gpio", .data = &mpc8572_gpio_devtype, }, { .compatible = "fsl,mpc8610-gpio", }, - { .compatible = "fsl,mpc5121-gpio", .data = mpc512x_irq_set_type, }, + { .compatible = "fsl,mpc5121-gpio", .data = &mpc512x_gpio_devtype, }, + { .compatible = "fsl,mpc5125-gpio", .data = &mpc5125_gpio_devtype, }, { .compatible = "fsl,pq3-gpio", }, { .compatible = "fsl,qoriq-gpio", }, {} @@ -351,6 +383,8 @@ static int mpc8xxx_probe(struct platform_device *pdev) struct of_mm_gpio_chip *mm_gc; struct gpio_chip *gc; const struct of_device_id *id; + const struct mpc8xxx_gpio_devtype *devtype = + of_device_get_match_data(&pdev->dev); int ret; mpc8xxx_gc = devm_kzalloc(&pdev->dev, sizeof(*mpc8xxx_gc), GFP_KERNEL); @@ -359,7 +393,7 @@ static int mpc8xxx_probe(struct platform_device *pdev) platform_set_drvdata(pdev, mpc8xxx_gc); - spin_lock_init(&mpc8xxx_gc->lock); + raw_spin_lock_init(&mpc8xxx_gc->lock); mm_gc = &mpc8xxx_gc->mm_gc; gc = &mm_gc->gc; @@ -367,10 +401,18 @@ static int mpc8xxx_probe(struct platform_device *pdev) mm_gc->save_regs = mpc8xxx_gpio_save_regs; gc->ngpio = MPC8XXX_GPIO_PINS; gc->direction_input = mpc8xxx_gpio_dir_in; - gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ? - mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out; - gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ? - mpc8572_gpio_get : mpc8xxx_gpio_get; + + if (!devtype) + devtype = &mpc8xxx_gpio_devtype_default; + + /* + * It's assumed that only a single type of gpio controller is available + * on the current machine, so overwriting global data is fine. + */ + mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type; + + gc->direction_output = devtype->gpio_dir_out ?: mpc8xxx_gpio_dir_out; + gc->get = devtype->gpio_get ?: mpc8xxx_gpio_get; gc->set = mpc8xxx_gpio_set; gc->set_multiple = mpc8xxx_gpio_set_multiple; gc->to_irq = mpc8xxx_gpio_to_irq; @@ -396,8 +438,8 @@ static int mpc8xxx_probe(struct platform_device *pdev) out_be32(mm_gc->regs + GPIO_IER, 0xffffffff); out_be32(mm_gc->regs + GPIO_IMR, 0); - irq_set_handler_data(mpc8xxx_gc->irqn, mpc8xxx_gc); - irq_set_chained_handler(mpc8xxx_gc->irqn, mpc8xxx_gpio_irq_cascade); + irq_set_chained_handler_and_data(mpc8xxx_gc->irqn, + mpc8xxx_gpio_irq_cascade, mpc8xxx_gc); return 0; } @@ -407,8 +449,7 @@ static int mpc8xxx_remove(struct platform_device *pdev) struct mpc8xxx_gpio_chip *mpc8xxx_gc = platform_get_drvdata(pdev); if (mpc8xxx_gc->irq) { - irq_set_handler_data(mpc8xxx_gc->irqn, NULL); - irq_set_chained_handler(mpc8xxx_gc->irqn, NULL); + irq_set_chained_handler_and_data(mpc8xxx_gc->irqn, NULL, NULL); irq_domain_remove(mpc8xxx_gc->irq); } diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c index 7bcfb87a5..22523aae8 100644 --- a/drivers/gpio/gpio-msic.c +++ b/drivers/gpio/gpio-msic.c @@ -232,7 +232,7 @@ static struct irq_chip msic_irqchip = { .irq_bus_sync_unlock = msic_bus_sync_unlock, }; -static void msic_gpio_irq_handler(unsigned irq, struct irq_desc *desc) +static void msic_gpio_irq_handler(struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct msic_gpio *mg = irq_data_get_irq_handler_data(data); diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c index 52ff18229..4b4222145 100644 --- a/drivers/gpio/gpio-msm-v2.c +++ b/drivers/gpio/gpio-msm-v2.c @@ -187,14 +187,6 @@ static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset) return irq_create_mapping(domain, offset); } -static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq) -{ - struct irq_data *irq_data = irq_get_irq_data(irq); - - return irq_data->hwirq; -} - - /* For dual-edge interrupts in software, since the hardware has no * such support: * @@ -238,7 +230,7 @@ static void msm_gpio_update_dual_edge_pos(unsigned gpio) static void msm_gpio_irq_ack(struct irq_data *d) { - int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); + int gpio = d->hwirq; writel(BIT(INTR_STATUS), GPIO_INTR_STATUS(gpio)); if (test_bit(gpio, msm_gpio.dual_edge_irqs)) @@ -247,8 +239,8 @@ static void msm_gpio_irq_ack(struct irq_data *d) static void msm_gpio_irq_mask(struct irq_data *d) { - int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); unsigned long irq_flags; + int gpio = d->hwirq; spin_lock_irqsave(&tlmm_lock, irq_flags); writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio)); @@ -259,8 +251,8 @@ static void msm_gpio_irq_mask(struct irq_data *d) static void msm_gpio_irq_unmask(struct irq_data *d) { - int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); unsigned long irq_flags; + int gpio = d->hwirq; spin_lock_irqsave(&tlmm_lock, irq_flags); __set_bit(gpio, msm_gpio.enabled_irqs); @@ -271,8 +263,8 @@ static void msm_gpio_irq_unmask(struct irq_data *d) static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) { - int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); unsigned long irq_flags; + int gpio = d->hwirq; uint32_t bits; spin_lock_irqsave(&tlmm_lock, irq_flags); @@ -281,14 +273,14 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) if (flow_type & IRQ_TYPE_EDGE_BOTH) { bits |= BIT(INTR_DECT_CTL); - __irq_set_handler_locked(d->irq, handle_edge_irq); + irq_set_handler_locked(d, handle_edge_irq); if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) __set_bit(gpio, msm_gpio.dual_edge_irqs); else __clear_bit(gpio, msm_gpio.dual_edge_irqs); } else { bits &= ~BIT(INTR_DECT_CTL); - __irq_set_handler_locked(d->irq, handle_level_irq); + irq_set_handler_locked(d, handle_level_irq); __clear_bit(gpio, msm_gpio.dual_edge_irqs); } @@ -313,7 +305,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) * which have been set as summary IRQ lines and which are triggered, * and to call their interrupt handlers. */ -static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc) +static void msm_summary_irq_handler(struct irq_desc *desc) { unsigned long i; struct irq_chip *chip = irq_desc_get_chip(desc); @@ -331,7 +323,7 @@ static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc) static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on) { - int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); + int gpio = d->hwirq; if (on) { if (bitmap_empty(msm_gpio.wake_irqs, MAX_NR_GPIO)) @@ -363,7 +355,6 @@ static int msm_gpio_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_set_lockdep_class(irq, &msm_gpio_lock_class); irq_set_chip_and_handler(irq, &msm_gpio_irq_chip, handle_level_irq); - set_irq_flags(irq, IRQF_VALID); return 0; } diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 1a5420586..df418b814 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -458,9 +458,9 @@ static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type) return 0; } -static void mvebu_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void mvebu_gpio_irq_handler(struct irq_desc *desc) { - struct mvebu_gpio_chip *mvchip = irq_get_handler_data(irq); + struct mvebu_gpio_chip *mvchip = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); u32 cause, type; int i; @@ -787,8 +787,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev) if (irq < 0) continue; - irq_set_handler_data(irq, mvchip); - irq_set_chained_handler(irq, mvebu_gpio_irq_handler); + irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler, + mvchip); } mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1); diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c index ec1eb1b72..b8dd84744 100644 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c @@ -272,11 +272,11 @@ static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat) } /* MX1 and MX3 has one interrupt *per* gpio port */ -static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc) +static void mx3_gpio_irq_handler(struct irq_desc *desc) { u32 irq_stat; - struct mxc_gpio_port *port = irq_get_handler_data(irq); - struct irq_chip *chip = irq_get_chip(irq); + struct mxc_gpio_port *port = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); chained_irq_enter(chip, desc); @@ -288,11 +288,11 @@ static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc) } /* MX2 has one interrupt *for all* gpio ports */ -static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc) +static void mx2_gpio_irq_handler(struct irq_desc *desc) { u32 irq_msk, irq_stat; struct mxc_gpio_port *port; - struct irq_chip *chip = irq_get_chip(irq); + struct irq_chip *chip = irq_desc_get_chip(desc); chained_irq_enter(chip, desc); @@ -339,13 +339,15 @@ static int gpio_set_wake_irq(struct irq_data *d, u32 enable) return 0; } -static void __init mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base) +static int mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base) { struct irq_chip_generic *gc; struct irq_chip_type *ct; gc = irq_alloc_generic_chip("gpio-mxc", 1, irq_base, port->base, handle_level_irq); + if (!gc) + return -ENOMEM; gc->private = port; ct = gc->chip_types; @@ -354,11 +356,14 @@ static void __init mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base) ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->chip.irq_set_type = gpio_set_irq_type; ct->chip.irq_set_wake = gpio_set_wake_irq; + ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND; ct->regs.ack = GPIO_ISR; ct->regs.mask = GPIO_IMR; irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK, IRQ_NOREQUEST, 0); + + return 0; } static void mxc_gpio_get_hw(struct platform_device *pdev) @@ -476,12 +481,16 @@ static int mxc_gpio_probe(struct platform_device *pdev) } /* gpio-mxc can be a generic irq chip */ - mxc_gpio_init_gc(port, irq_base); + err = mxc_gpio_init_gc(port, irq_base); + if (err < 0) + goto out_irqdomain_remove; list_add_tail(&port->node, &mxc_gpio_ports); return 0; +out_irqdomain_remove: + irq_domain_remove(port->domain); out_irqdesc_free: irq_free_descs(irq_base, 32); out_gpiochip_remove: diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c index 551d15d7c..a4288f428 100644 --- a/drivers/gpio/gpio-mxs.c +++ b/drivers/gpio/gpio-mxs.c @@ -154,10 +154,10 @@ static void mxs_flip_edge(struct mxs_gpio_port *port, u32 gpio) } /* MXS has one interrupt *per* gpio port */ -static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc) +static void mxs_gpio_irq_handler(struct irq_desc *desc) { u32 irq_stat; - struct mxs_gpio_port *port = irq_get_handler_data(irq); + struct mxs_gpio_port *port = irq_desc_get_handler_data(desc); desc->irq_data.chip->irq_ack(&desc->irq_data); @@ -196,13 +196,16 @@ static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable) return 0; } -static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base) +static int __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base) { struct irq_chip_generic *gc; struct irq_chip_type *ct; gc = irq_alloc_generic_chip("gpio-mxs", 1, irq_base, port->base, handle_level_irq); + if (!gc) + return -ENOMEM; + gc->private = port; ct = gc->chip_types; @@ -216,6 +219,8 @@ static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base) irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK, IRQ_NOREQUEST, 0); + + return 0; } static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset) @@ -317,7 +322,9 @@ static int mxs_gpio_probe(struct platform_device *pdev) } /* gpio-mxs can be a generic irq chip */ - mxs_gpio_init_gc(port, irq_base); + err = mxs_gpio_init_gc(port, irq_base); + if (err < 0) + goto out_irqdomain_remove; /* setup one handler for each entry */ irq_set_chained_handler_and_data(port->irq, mxs_gpio_irq_handler, @@ -343,6 +350,8 @@ static int mxs_gpio_probe(struct platform_device *pdev) out_bgpio_remove: bgpio_remove(&port->bgc); +out_irqdomain_remove: + irq_domain_remove(port->domain); out_irqdesc_free: irq_free_descs(irq_base, 32); return err; diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 61a731ff9..5236db161 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -29,6 +29,7 @@ #include #define OFF_MODE 1 +#define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF static LIST_HEAD(omap_gpio_list); @@ -57,7 +58,7 @@ struct gpio_bank { u32 saved_datain; u32 level_mask; u32 toggle_mask; - spinlock_t lock; + raw_spinlock_t lock; struct gpio_chip chip; struct clk *dbck; u32 mod_usage; @@ -175,7 +176,7 @@ static inline void omap_gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set static inline void omap_gpio_dbck_enable(struct gpio_bank *bank) { if (bank->dbck_enable_mask && !bank->dbck_enabled) { - clk_prepare_enable(bank->dbck); + clk_enable(bank->dbck); bank->dbck_enabled = true; writel_relaxed(bank->dbck_enable_mask, @@ -193,7 +194,7 @@ static inline void omap_gpio_dbck_disable(struct gpio_bank *bank) */ writel_relaxed(0, bank->base + bank->regs->debounce_en); - clk_disable_unprepare(bank->dbck); + clk_disable(bank->dbck); bank->dbck_enabled = false; } } @@ -204,8 +205,9 @@ static inline void omap_gpio_dbck_disable(struct gpio_bank *bank) * @offset: the gpio number on this @bank * @debounce: debounce time to use * - * OMAP's debounce time is in 31us steps so we need - * to convert and round up to the closest unit. + * OMAP's debounce time is in 31us steps + * = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31 + * so we need to convert and round up to the closest unit. */ static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset, unsigned debounce) @@ -213,34 +215,33 @@ static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset, void __iomem *reg; u32 val; u32 l; + bool enable = !!debounce; if (!bank->dbck_flag) return; - if (debounce < 32) - debounce = 0x01; - else if (debounce > 7936) - debounce = 0xff; - else - debounce = (debounce / 0x1f) - 1; + if (enable) { + debounce = DIV_ROUND_UP(debounce, 31) - 1; + debounce &= OMAP4_GPIO_DEBOUNCINGTIME_MASK; + } l = BIT(offset); - clk_prepare_enable(bank->dbck); + clk_enable(bank->dbck); reg = bank->base + bank->regs->debounce; writel_relaxed(debounce, reg); reg = bank->base + bank->regs->debounce_en; val = readl_relaxed(reg); - if (debounce) + if (enable) val |= l; else val &= ~l; bank->dbck_enable_mask = val; writel_relaxed(val, reg); - clk_disable_unprepare(bank->dbck); + clk_disable(bank->dbck); /* * Enable debounce clock per module. * This call is mandatory because in omap_gpio_request() when @@ -285,7 +286,7 @@ static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset) bank->context.debounce = 0; writel_relaxed(bank->context.debounce, bank->base + bank->regs->debounce); - clk_disable_unprepare(bank->dbck); + clk_disable(bank->dbck); bank->dbck_enabled = false; } } @@ -498,24 +499,24 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type) if (!BANK_USED(bank)) pm_runtime_get_sync(bank->dev); - spin_lock_irqsave(&bank->lock, flags); + raw_spin_lock_irqsave(&bank->lock, flags); retval = omap_set_gpio_triggering(bank, offset, type); if (retval) { - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); goto error; } omap_gpio_init_irq(bank, offset); if (!omap_gpio_is_input(bank, offset)) { - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); retval = -EINVAL; goto error; } - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) - __irq_set_handler_locked(d->irq, handle_level_irq); + irq_set_handler_locked(d, handle_level_irq); else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) - __irq_set_handler_locked(d->irq, handle_edge_irq); + irq_set_handler_locked(d, handle_edge_irq); return 0; @@ -636,14 +637,14 @@ static int omap_set_gpio_wakeup(struct gpio_bank *bank, unsigned offset, return -EINVAL; } - spin_lock_irqsave(&bank->lock, flags); + raw_spin_lock_irqsave(&bank->lock, flags); if (enable) bank->context.wake_en |= gpio_bit; else bank->context.wake_en &= ~gpio_bit; writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en); - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); return 0; } @@ -669,10 +670,10 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) if (!BANK_USED(bank)) pm_runtime_get_sync(bank->dev); - spin_lock_irqsave(&bank->lock, flags); + raw_spin_lock_irqsave(&bank->lock, flags); omap_enable_gpio_module(bank, offset); bank->mod_usage |= BIT(offset); - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); return 0; } @@ -682,14 +683,14 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); unsigned long flags; - spin_lock_irqsave(&bank->lock, flags); + raw_spin_lock_irqsave(&bank->lock, flags); bank->mod_usage &= ~(BIT(offset)); if (!LINE_USED(bank->irq_usage, offset)) { omap_set_gpio_direction(bank, offset, 1); omap_clear_gpio_debounce(bank, offset); } omap_disable_gpio_module(bank, offset); - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); /* * If this is the last gpio to be freed in the bank, @@ -708,7 +709,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) * line's interrupt handler has been run, we may miss some nested * interrupts. */ -static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void omap_gpio_irq_handler(struct irq_desc *desc) { void __iomem *isr_reg = NULL; u32 isr; @@ -716,7 +717,8 @@ static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) struct gpio_bank *bank; int unmasked = 0; struct irq_chip *irqchip = irq_desc_get_chip(desc); - struct gpio_chip *chip = irq_get_handler_data(irq); + struct gpio_chip *chip = irq_desc_get_handler_data(desc); + unsigned long lock_flags; chained_irq_enter(irqchip, desc); @@ -731,6 +733,8 @@ static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) u32 isr_saved, level_mask = 0; u32 enabled; + raw_spin_lock_irqsave(&bank->lock, lock_flags); + enabled = omap_get_gpio_irqbank_mask(bank); isr_saved = isr = readl_relaxed(isr_reg) & enabled; @@ -744,6 +748,8 @@ static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask); omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask); + raw_spin_unlock_irqrestore(&bank->lock, lock_flags); + /* if there is only edge sensitive GPIO pin interrupts configured, we could unmask GPIO bank interrupt immediately */ if (!level_mask && !unmasked) { @@ -758,6 +764,7 @@ static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) bit = __ffs(isr); isr &= ~(BIT(bit)); + raw_spin_lock_irqsave(&bank->lock, lock_flags); /* * Some chips can't respond to both rising and falling * at the same time. If this irq was requested with @@ -768,6 +775,8 @@ static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) if (bank->toggle_mask & (BIT(bit))) omap_toggle_gpio_edge_triggering(bank, bit); + raw_spin_unlock_irqrestore(&bank->lock, lock_flags); + generic_handle_irq(irq_find_mapping(bank->chip.irqdomain, bit)); } @@ -791,7 +800,7 @@ static unsigned int omap_gpio_irq_startup(struct irq_data *d) if (!BANK_USED(bank)) pm_runtime_get_sync(bank->dev); - spin_lock_irqsave(&bank->lock, flags); + raw_spin_lock_irqsave(&bank->lock, flags); if (!LINE_USED(bank->mod_usage, offset)) omap_set_gpio_direction(bank, offset, 1); @@ -800,12 +809,12 @@ static unsigned int omap_gpio_irq_startup(struct irq_data *d) omap_enable_gpio_module(bank, offset); bank->irq_usage |= BIT(offset); - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); omap_gpio_unmask_irq(d); return 0; err: - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); if (!BANK_USED(bank)) pm_runtime_put(bank->dev); return -EINVAL; @@ -817,7 +826,7 @@ static void omap_gpio_irq_shutdown(struct irq_data *d) unsigned long flags; unsigned offset = d->hwirq; - spin_lock_irqsave(&bank->lock, flags); + raw_spin_lock_irqsave(&bank->lock, flags); bank->irq_usage &= ~(BIT(offset)); omap_set_gpio_irqenable(bank, offset, 0); omap_clear_gpio_irqstatus(bank, offset); @@ -825,7 +834,7 @@ static void omap_gpio_irq_shutdown(struct irq_data *d) if (!LINE_USED(bank->mod_usage, offset)) omap_clear_gpio_debounce(bank, offset); omap_disable_gpio_module(bank, offset); - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); /* * If this is the last IRQ to be freed in the bank, @@ -849,10 +858,10 @@ static void omap_gpio_mask_irq(struct irq_data *d) unsigned offset = d->hwirq; unsigned long flags; - spin_lock_irqsave(&bank->lock, flags); + raw_spin_lock_irqsave(&bank->lock, flags); omap_set_gpio_irqenable(bank, offset, 0); omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); } static void omap_gpio_unmask_irq(struct irq_data *d) @@ -862,7 +871,7 @@ static void omap_gpio_unmask_irq(struct irq_data *d) u32 trigger = irqd_get_trigger_type(d); unsigned long flags; - spin_lock_irqsave(&bank->lock, flags); + raw_spin_lock_irqsave(&bank->lock, flags); if (trigger) omap_set_gpio_triggering(bank, offset, trigger); @@ -874,7 +883,7 @@ static void omap_gpio_unmask_irq(struct irq_data *d) } omap_set_gpio_irqenable(bank, offset, 1); - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); } /*---------------------------------------------------------------------*/ @@ -887,9 +896,9 @@ static int omap_mpuio_suspend_noirq(struct device *dev) OMAP_MPUIO_GPIO_MASKIT / bank->stride; unsigned long flags; - spin_lock_irqsave(&bank->lock, flags); + raw_spin_lock_irqsave(&bank->lock, flags); writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg); - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); return 0; } @@ -902,9 +911,9 @@ static int omap_mpuio_resume_noirq(struct device *dev) OMAP_MPUIO_GPIO_MASKIT / bank->stride; unsigned long flags; - spin_lock_irqsave(&bank->lock, flags); + raw_spin_lock_irqsave(&bank->lock, flags); writel_relaxed(bank->context.wake_en, mask_reg); - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); return 0; } @@ -950,9 +959,9 @@ static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset) bank = container_of(chip, struct gpio_bank, chip); reg = bank->base + bank->regs->direction; - spin_lock_irqsave(&bank->lock, flags); + raw_spin_lock_irqsave(&bank->lock, flags); dir = !!(readl_relaxed(reg) & BIT(offset)); - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); return dir; } @@ -962,9 +971,9 @@ static int omap_gpio_input(struct gpio_chip *chip, unsigned offset) unsigned long flags; bank = container_of(chip, struct gpio_bank, chip); - spin_lock_irqsave(&bank->lock, flags); + raw_spin_lock_irqsave(&bank->lock, flags); omap_set_gpio_direction(bank, offset, 1); - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); return 0; } @@ -986,10 +995,10 @@ static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value) unsigned long flags; bank = container_of(chip, struct gpio_bank, chip); - spin_lock_irqsave(&bank->lock, flags); + raw_spin_lock_irqsave(&bank->lock, flags); bank->set_dataout(bank, offset, value); omap_set_gpio_direction(bank, offset, 0); - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); return 0; } @@ -1001,9 +1010,9 @@ static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset, bank = container_of(chip, struct gpio_bank, chip); - spin_lock_irqsave(&bank->lock, flags); + raw_spin_lock_irqsave(&bank->lock, flags); omap2_set_gpio_debounce(bank, offset, debounce); - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); return 0; } @@ -1014,9 +1023,9 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value) unsigned long flags; bank = container_of(chip, struct gpio_bank, chip); - spin_lock_irqsave(&bank->lock, flags); + raw_spin_lock_irqsave(&bank->lock, flags); bank->set_dataout(bank, offset, value); - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); } /*---------------------------------------------------------------------*/ @@ -1061,10 +1070,6 @@ static void omap_gpio_mod_init(struct gpio_bank *bank) /* Initialize interface clk ungated, module enabled */ if (bank->regs->ctrl) writel_relaxed(0, base + bank->regs->ctrl); - - bank->dbck = clk_get(bank->dev, "dbclk"); - if (IS_ERR(bank->dbck)) - dev_err(bank->dev, "Could not get gpio dbck\n"); } static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) @@ -1093,7 +1098,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) } else { bank->chip.label = "gpio"; bank->chip.base = gpio; - gpio += bank->width; } bank->chip.ngpio = bank->width; @@ -1103,6 +1107,9 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) return ret; } + if (!bank->is_mpuio) + gpio += bank->width; + #ifdef CONFIG_ARCH_OMAP1 /* * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop @@ -1178,13 +1185,16 @@ static int omap_gpio_probe(struct platform_device *pdev) irqc->irq_set_wake = omap_gpio_wake_enable, irqc->name = dev_name(&pdev->dev); - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (unlikely(!res)) { - dev_err(dev, "Invalid IRQ resource\n"); - return -ENODEV; + bank->irq = platform_get_irq(pdev, 0); + if (bank->irq <= 0) { + if (!bank->irq) + bank->irq = -ENXIO; + if (bank->irq != -EPROBE_DEFER) + dev_err(dev, + "can't get irq resource ret=%d\n", bank->irq); + return bank->irq; } - bank->irq = res->start; bank->dev = dev; bank->chip.dev = dev; bank->chip.owner = THIS_MODULE; @@ -1213,16 +1223,26 @@ static int omap_gpio_probe(struct platform_device *pdev) else bank->set_dataout = omap_set_gpio_dataout_mask; - spin_lock_init(&bank->lock); + raw_spin_lock_init(&bank->lock); /* Static mapping, never released */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); bank->base = devm_ioremap_resource(dev, res); if (IS_ERR(bank->base)) { - irq_domain_remove(bank->chip.irqdomain); return PTR_ERR(bank->base); } + if (bank->dbck_flag) { + bank->dbck = devm_clk_get(bank->dev, "dbclk"); + if (IS_ERR(bank->dbck)) { + dev_err(bank->dev, + "Could not get gpio dbck. Disable debounce\n"); + bank->dbck_flag = false; + } else { + clk_prepare(bank->dbck); + } + } + platform_set_drvdata(pdev, bank); pm_runtime_enable(bank->dev); @@ -1235,8 +1255,11 @@ static int omap_gpio_probe(struct platform_device *pdev) omap_gpio_mod_init(bank); ret = omap_gpio_chip_init(bank, irqc); - if (ret) + if (ret) { + pm_runtime_put_sync(bank->dev); + pm_runtime_disable(bank->dev); return ret; + } omap_gpio_show_rev(bank); @@ -1254,6 +1277,8 @@ static int omap_gpio_remove(struct platform_device *pdev) list_del(&bank->node); gpiochip_remove(&bank->chip); pm_runtime_disable(bank->dev); + if (bank->dbck_flag) + clk_unprepare(bank->dbck); return 0; } @@ -1271,7 +1296,7 @@ static int omap_gpio_runtime_suspend(struct device *dev) unsigned long flags; u32 wake_low, wake_hi; - spin_lock_irqsave(&bank->lock, flags); + raw_spin_lock_irqsave(&bank->lock, flags); /* * Only edges can generate a wakeup event to the PRCM. @@ -1324,7 +1349,7 @@ update_gpio_context_count: bank->get_context_loss_count(bank->dev); omap_gpio_dbck_disable(bank); - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); return 0; } @@ -1339,7 +1364,7 @@ static int omap_gpio_runtime_resume(struct device *dev) unsigned long flags; int c; - spin_lock_irqsave(&bank->lock, flags); + raw_spin_lock_irqsave(&bank->lock, flags); /* * On the first resume during the probe, the context has not @@ -1375,14 +1400,14 @@ static int omap_gpio_runtime_resume(struct device *dev) if (c != bank->context_loss_count) { omap_gpio_restore_context(bank); } else { - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); return 0; } } } if (!bank->workaround_enabled) { - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); return 0; } @@ -1437,7 +1462,7 @@ static int omap_gpio_runtime_resume(struct device *dev) } bank->workaround_enabled = false; - spin_unlock_irqrestore(&bank->lock, flags); + raw_spin_unlock_irqrestore(&bank->lock, flags); return 0; } diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c index 404f3c61e..1d4d9bc8b 100644 --- a/drivers/gpio/gpio-pcf857x.c +++ b/drivers/gpio/gpio-pcf857x.c @@ -88,7 +88,6 @@ struct pcf857x { struct gpio_chip chip; struct i2c_client *client; struct mutex lock; /* protect 'out' */ - spinlock_t slock; /* protect irq demux */ unsigned out; /* software latch */ unsigned status; /* current status */ unsigned int irq_parent; @@ -185,23 +184,21 @@ static void pcf857x_set(struct gpio_chip *chip, unsigned offset, int value) static irqreturn_t pcf857x_irq(int irq, void *data) { struct pcf857x *gpio = data; - unsigned long change, i, status, flags; + unsigned long change, i, status; status = gpio->read(gpio->client); - spin_lock_irqsave(&gpio->slock, flags); - /* * call the interrupt handler iff gpio is used as * interrupt source, just to avoid bad irqs */ - + mutex_lock(&gpio->lock); change = (gpio->status ^ status) & gpio->irq_enabled; - for_each_set_bit(i, &change, gpio->chip.ngpio) - handle_nested_irq(irq_find_mapping(gpio->chip.irqdomain, i)); gpio->status = status; + mutex_unlock(&gpio->lock); - spin_unlock_irqrestore(&gpio->slock, flags); + for_each_set_bit(i, &change, gpio->chip.ngpio) + handle_nested_irq(irq_find_mapping(gpio->chip.irqdomain, i)); return IRQ_HANDLED; } @@ -293,7 +290,6 @@ static int pcf857x_probe(struct i2c_client *client, return -ENOMEM; mutex_init(&gpio->lock); - spin_lock_init(&gpio->slock); gpio->chip.base = pdata ? pdata->gpio_base : -1; gpio->chip.can_sleep = true; diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c index 2d9a950ca..34ed176df 100644 --- a/drivers/gpio/gpio-pch.c +++ b/drivers/gpio/gpio-pch.c @@ -281,9 +281,9 @@ static int pch_irq_type(struct irq_data *d, unsigned int type) /* And the handler */ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) - __irq_set_handler_locked(d->irq, handle_level_irq); + irq_set_handler_locked(d, handle_level_irq); else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) - __irq_set_handler_locked(d->irq, handle_edge_irq); + irq_set_handler_locked(d, handle_edge_irq); unlock: spin_unlock_irqrestore(&chip->spinlock, flags); diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c index 047561304..229ef653e 100644 --- a/drivers/gpio/gpio-pl061.c +++ b/drivers/gpio/gpio-pl061.c @@ -187,7 +187,7 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger) return 0; } -static void pl061_irq_handler(unsigned irq, struct irq_desc *desc) +static void pl061_irq_handler(struct irq_desc *desc) { unsigned long pending; int offset; diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index cdbbcf0fa..df2ce550f 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -401,7 +401,7 @@ static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type) return 0; } -static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc) +static void pxa_gpio_demux_handler(struct irq_desc *desc) { struct pxa_gpio_chip *c; int loop, gpio, gpio_base, n; @@ -524,7 +524,7 @@ static int pxa_irq_domain_map(struct irq_domain *d, unsigned int irq, { irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip, handle_edge_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_set_noprobe(irq); return 0; } @@ -643,20 +643,20 @@ static int pxa_gpio_probe(struct platform_device *pdev) irq = gpio_to_irq(0); irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip, handle_edge_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } if (irq1 > 0) { irq = gpio_to_irq(1); irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip, handle_edge_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } for (irq = gpio_to_irq(gpio_offset); irq <= gpio_to_irq(pxa_last_gpio); irq++) { irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip, handle_edge_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } } diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index 1e14a6c74..2a8122444 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -251,17 +251,32 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip, static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset) { - return pinctrl_request_gpio(chip->base + offset); + struct gpio_rcar_priv *p = gpio_to_priv(chip); + int error; + + error = pm_runtime_get_sync(&p->pdev->dev); + if (error < 0) + return error; + + error = pinctrl_request_gpio(chip->base + offset); + if (error) + pm_runtime_put(&p->pdev->dev); + + return error; } static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset) { + struct gpio_rcar_priv *p = gpio_to_priv(chip); + pinctrl_free_gpio(chip->base + offset); /* Set the GPIO as an input to ensure that the next GPIO request won't * drive the GPIO pin as an output. */ gpio_rcar_config_general_input_output_mode(chip, offset, false); + + pm_runtime_put(&p->pdev->dev); } static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset) @@ -326,6 +341,10 @@ static const struct of_device_id gpio_rcar_of_table[] = { }, { .compatible = "renesas,gpio-r8a7794", .data = &gpio_rcar_info_gen2, + }, { + .compatible = "renesas,gpio-r8a7795", + /* Gen3 GPIO is identical to Gen2. */ + .data = &gpio_rcar_info_gen2, }, { .compatible = "renesas,gpio-rcar", .data = &gpio_rcar_info_gen1, @@ -405,7 +424,6 @@ static int gpio_rcar_probe(struct platform_device *pdev) } pm_runtime_enable(dev); - pm_runtime_get_sync(dev); io = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); @@ -487,7 +505,6 @@ static int gpio_rcar_probe(struct platform_device *pdev) err1: gpiochip_remove(gpio_chip); err0: - pm_runtime_put(dev); pm_runtime_disable(dev); return ret; } @@ -498,7 +515,6 @@ static int gpio_rcar_remove(struct platform_device *pdev) gpiochip_remove(&p->gpio_chip); - pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c index 3fa22dade..990fa9023 100644 --- a/drivers/gpio/gpio-sa1100.c +++ b/drivers/gpio/gpio-sa1100.c @@ -155,7 +155,7 @@ static int sa1100_gpio_irqdomain_map(struct irq_domain *d, { irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip, handle_edge_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_set_noprobe(irq); return 0; } @@ -172,10 +172,9 @@ static struct irq_domain *sa1100_gpio_irqdomain; * irq_controller_lock held, and IRQs disabled. Decode the IRQ * and call the handler. */ -static void -sa1100_gpio_handler(unsigned int irq, struct irq_desc *desc) +static void sa1100_gpio_handler(struct irq_desc *desc) { - unsigned int mask; + unsigned int irq, mask; mask = GEDR; do { diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c index 18579ac65..55e47828d 100644 --- a/drivers/gpio/gpio-sta2x11.c +++ b/drivers/gpio/gpio-sta2x11.c @@ -346,7 +346,7 @@ static void gsta_alloc_irq_chip(struct gsta_gpio *chip) i = chip->irq_base + j; irq_set_chip_and_handler(i, &ct->chip, ct->handler); irq_set_chip_data(i, gc); - irq_modify_status(i, IRQ_NOREQUEST | IRQ_NOPROBE, 0); + irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE); } gc->irq_cnt = i - gc->irq_base; } diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c index 458d9d795..9c6b96707 100644 --- a/drivers/gpio/gpio-sx150x.c +++ b/drivers/gpio/gpio-sx150x.c @@ -706,4 +706,3 @@ module_exit(sx150x_exit); MODULE_AUTHOR("Gregory Bean "); MODULE_DESCRIPTION("Driver for Semtech SX150X I2C GPIO Expanders"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("i2c:sx150x"); diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c index 31b244cff..d1d585ddb 100644 --- a/drivers/gpio/gpio-tc3589x.c +++ b/drivers/gpio/gpio-tc3589x.c @@ -102,7 +102,7 @@ static struct gpio_chip template_chip = { static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip); + struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc); int offset = d->hwirq; int regoffset = offset / 8; int mask = 1 << (offset % 8); @@ -130,7 +130,7 @@ static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type) static void tc3589x_gpio_irq_lock(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip); + struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc); mutex_lock(&tc3589x_gpio->irq_lock); } @@ -138,7 +138,7 @@ static void tc3589x_gpio_irq_lock(struct irq_data *d) static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip); + struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc); struct tc3589x *tc3589x = tc3589x_gpio->tc3589x; static const u8 regmap[] = { [REG_IBE] = TC3589x_GPIOIBE0, @@ -167,7 +167,7 @@ static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d) static void tc3589x_gpio_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip); + struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc); int offset = d->hwirq; int regoffset = offset / 8; int mask = 1 << (offset % 8); @@ -178,7 +178,7 @@ static void tc3589x_gpio_irq_mask(struct irq_data *d) static void tc3589x_gpio_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip); + struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc); int offset = d->hwirq; int regoffset = offset / 8; int mask = 1 << (offset % 8); diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index 9b25c90f7..027e5f47d 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -252,9 +252,9 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type) tegra_gpio_enable(gpio); if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) - __irq_set_handler_locked(d->irq, handle_level_irq); + irq_set_handler_locked(d, handle_level_irq); else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) - __irq_set_handler_locked(d->irq, handle_edge_irq); + irq_set_handler_locked(d, handle_edge_irq); return 0; } @@ -266,18 +266,16 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d) gpiochip_unlock_as_irq(&tegra_gpio_chip, gpio); } -static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void tegra_gpio_irq_handler(struct irq_desc *desc) { - struct tegra_gpio_bank *bank; int port; int pin; int unmasked = 0; struct irq_chip *chip = irq_desc_get_chip(desc); + struct tegra_gpio_bank *bank = irq_desc_get_handler_data(desc); chained_irq_enter(chip, desc); - bank = irq_get_handler_data(irq); - for (port = 0; port < 4; port++) { int gpio = tegra_gpio_compose(bank->bank, port, 0); unsigned long sta = tegra_gpio_readl(GPIO_INT_STA(gpio)) & @@ -509,7 +507,6 @@ static int tegra_gpio_probe(struct platform_device *pdev) irq_set_chip_data(irq, bank); irq_set_chip_and_handler(irq, &tegra_gpio_irq_chip, handle_simple_irq); - set_irq_flags(irq, IRQF_VALID); } for (i = 0; i < tegra_gpio_bank_count; i++) { diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c index e8f97e03c..30653e631 100644 --- a/drivers/gpio/gpio-timberdale.c +++ b/drivers/gpio/gpio-timberdale.c @@ -192,13 +192,14 @@ out: return ret; } -static void timbgpio_irq(unsigned int irq, struct irq_desc *desc) +static void timbgpio_irq(struct irq_desc *desc) { - struct timbgpio *tgpio = irq_get_handler_data(irq); + struct timbgpio *tgpio = irq_desc_get_handler_data(desc); + struct irq_data *data = irq_desc_get_irq_data(desc); unsigned long ipr; int offset; - desc->irq_data.chip->irq_ack(irq_get_irq_data(irq)); + data->chip->irq_ack(data); ipr = ioread32(tgpio->membase + TGPIO_IPR); iowrite32(ipr, tgpio->membase + TGPIO_ICR); @@ -294,13 +295,10 @@ static int timbgpio_probe(struct platform_device *pdev) irq_set_chip_and_handler(tgpio->irq_base + i, &timbgpio_irqchip, handle_simple_irq); irq_set_chip_data(tgpio->irq_base + i, tgpio); -#ifdef CONFIG_ARM - set_irq_flags(tgpio->irq_base + i, IRQF_VALID | IRQF_PROBE); -#endif + irq_clear_status_flags(tgpio->irq_base + i, IRQ_NOREQUEST | IRQ_NOPROBE); } - irq_set_handler_data(irq, tgpio); - irq_set_chained_handler(irq, timbgpio_irq); + irq_set_chained_handler_and_data(irq, timbgpio_irq, tgpio); return 0; } diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c index 445660adc..87bb1b1ee 100644 --- a/drivers/gpio/gpio-tz1090.c +++ b/drivers/gpio/gpio-tz1090.c @@ -375,7 +375,7 @@ static int gpio_set_irq_wake(struct irq_data *data, unsigned int on) #define gpio_set_irq_wake NULL #endif -static void tz1090_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void tz1090_gpio_irq_handler(struct irq_desc *desc) { irq_hw_number_t hw; unsigned int irq_stat, irq_no; @@ -400,7 +400,7 @@ static void tz1090_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) == IRQ_TYPE_EDGE_BOTH) tz1090_gpio_irq_next_edge(bank, hw); - generic_handle_irq_desc(irq_no, child_desc); + generic_handle_irq_desc(child_desc); } } @@ -510,8 +510,8 @@ static int tz1090_gpio_bank_probe(struct tz1090_gpio_bank_info *info) gc->chip_types[1].chip.flags = IRQCHIP_MASK_ON_SUSPEND; /* Setup chained handler for this GPIO bank */ - irq_set_handler_data(bank->irq, bank); - irq_set_chained_handler(bank->irq, tz1090_gpio_irq_handler); + irq_set_chained_handler_and_data(bank->irq, tz1090_gpio_irq_handler, + bank); return 0; } diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c index 7bd9f209f..069f9e4b7 100644 --- a/drivers/gpio/gpio-vf610.c +++ b/drivers/gpio/gpio-vf610.c @@ -60,6 +60,8 @@ struct vf610_gpio_port { #define PORT_INT_EITHER_EDGE 0xb #define PORT_INT_LOGIC_ONE 0xc +static struct irq_chip vf610_gpio_irq_chip; + static const struct of_device_id vf610_gpio_dt_ids[] = { { .compatible = "fsl,vf610-gpio" }, { /* sentinel */ } @@ -118,9 +120,9 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, return pinctrl_gpio_direction_output(chip->base + gpio); } -static void vf610_gpio_irq_handler(u32 irq, struct irq_desc *desc) +static void vf610_gpio_irq_handler(struct irq_desc *desc) { - struct vf610_gpio_port *port = irq_get_handler_data(irq); + struct vf610_gpio_port *port = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); int pin; unsigned long irq_isfr; @@ -173,6 +175,11 @@ static int vf610_gpio_irq_set_type(struct irq_data *d, u32 type) port->irqc[d->hwirq] = irqc; + if (type & IRQ_TYPE_LEVEL_MASK) + irq_set_handler_locked(d, handle_level_irq); + else + irq_set_handler_locked(d, handle_edge_irq); + return 0; } @@ -263,7 +270,7 @@ static int vf610_gpio_probe(struct platform_device *pdev) vf610_gpio_writel(~0, port->base + PORT_ISFR); ret = gpiochip_irqchip_add(gc, &vf610_gpio_irq_chip, 0, - handle_simple_irq, IRQ_TYPE_NONE); + handle_edge_irq, IRQ_TYPE_NONE); if (ret) { dev_err(dev, "failed to add irqchip\n"); gpiochip_remove(gc); diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c index 9bdab7203..e02499a15 100644 --- a/drivers/gpio/gpio-xlp.c +++ b/drivers/gpio/gpio-xlp.c @@ -387,7 +387,7 @@ static int xlp_gpio_probe(struct platform_device *pdev) irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0); if (irq_base < 0) { dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n"); - return err; + return -ENODEV; } err = gpiochip_add(gc); diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c new file mode 100644 index 000000000..4b8a26910 --- /dev/null +++ b/drivers/gpio/gpio-zx.c @@ -0,0 +1,324 @@ +/* + * Copyright (C) 2015 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ZX_GPIO_DIR 0x00 +#define ZX_GPIO_IVE 0x04 +#define ZX_GPIO_IV 0x08 +#define ZX_GPIO_IEP 0x0C +#define ZX_GPIO_IEN 0x10 +#define ZX_GPIO_DI 0x14 +#define ZX_GPIO_DO1 0x18 +#define ZX_GPIO_DO0 0x1C +#define ZX_GPIO_DO 0x20 + +#define ZX_GPIO_IM 0x28 +#define ZX_GPIO_IE 0x2C + +#define ZX_GPIO_MIS 0x30 +#define ZX_GPIO_IC 0x34 + +#define ZX_GPIO_NR 16 + +struct zx_gpio { + spinlock_t lock; + + void __iomem *base; + struct gpio_chip gc; + bool uses_pinctrl; +}; + +static inline struct zx_gpio *to_zx(struct gpio_chip *gc) +{ + return container_of(gc, struct zx_gpio, gc); +} + +static int zx_gpio_request(struct gpio_chip *gc, unsigned offset) +{ + struct zx_gpio *chip = to_zx(gc); + int gpio = gc->base + offset; + + if (chip->uses_pinctrl) + return pinctrl_request_gpio(gpio); + return 0; +} + +static void zx_gpio_free(struct gpio_chip *gc, unsigned offset) +{ + struct zx_gpio *chip = to_zx(gc); + int gpio = gc->base + offset; + + if (chip->uses_pinctrl) + pinctrl_free_gpio(gpio); +} + +static int zx_direction_input(struct gpio_chip *gc, unsigned offset) +{ + struct zx_gpio *chip = to_zx(gc); + unsigned long flags; + u16 gpiodir; + + if (offset >= gc->ngpio) + return -EINVAL; + + spin_lock_irqsave(&chip->lock, flags); + gpiodir = readw_relaxed(chip->base + ZX_GPIO_DIR); + gpiodir &= ~BIT(offset); + writew_relaxed(gpiodir, chip->base + ZX_GPIO_DIR); + spin_unlock_irqrestore(&chip->lock, flags); + + return 0; +} + +static int zx_direction_output(struct gpio_chip *gc, unsigned offset, + int value) +{ + struct zx_gpio *chip = to_zx(gc); + unsigned long flags; + u16 gpiodir; + + if (offset >= gc->ngpio) + return -EINVAL; + + spin_lock_irqsave(&chip->lock, flags); + gpiodir = readw_relaxed(chip->base + ZX_GPIO_DIR); + gpiodir |= BIT(offset); + writew_relaxed(gpiodir, chip->base + ZX_GPIO_DIR); + + if (value) + writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO1); + else + writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO0); + spin_unlock_irqrestore(&chip->lock, flags); + + return 0; +} + +static int zx_get_value(struct gpio_chip *gc, unsigned offset) +{ + struct zx_gpio *chip = to_zx(gc); + + return !!(readw_relaxed(chip->base + ZX_GPIO_DI) & BIT(offset)); +} + +static void zx_set_value(struct gpio_chip *gc, unsigned offset, int value) +{ + struct zx_gpio *chip = to_zx(gc); + + if (value) + writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO1); + else + writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO0); +} + +static int zx_irq_type(struct irq_data *d, unsigned trigger) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zx_gpio *chip = to_zx(gc); + int offset = irqd_to_hwirq(d); + unsigned long flags; + u16 gpiois, gpioi_epos, gpioi_eneg, gpioiev; + u16 bit = BIT(offset); + + if (offset < 0 || offset >= ZX_GPIO_NR) + return -EINVAL; + + spin_lock_irqsave(&chip->lock, flags); + + gpioiev = readw_relaxed(chip->base + ZX_GPIO_IV); + gpiois = readw_relaxed(chip->base + ZX_GPIO_IVE); + gpioi_epos = readw_relaxed(chip->base + ZX_GPIO_IEP); + gpioi_eneg = readw_relaxed(chip->base + ZX_GPIO_IEN); + + if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) { + gpiois |= bit; + if (trigger & IRQ_TYPE_LEVEL_HIGH) + gpioiev |= bit; + else + gpioiev &= ~bit; + } else + gpiois &= ~bit; + + if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) { + gpioi_epos |= bit; + gpioi_eneg |= bit; + } else { + if (trigger & IRQ_TYPE_EDGE_RISING) { + gpioi_epos |= bit; + gpioi_eneg &= ~bit; + } else if (trigger & IRQ_TYPE_EDGE_FALLING) { + gpioi_eneg |= bit; + gpioi_epos &= ~bit; + } + } + + writew_relaxed(gpiois, chip->base + ZX_GPIO_IVE); + writew_relaxed(gpioi_epos, chip->base + ZX_GPIO_IEP); + writew_relaxed(gpioi_eneg, chip->base + ZX_GPIO_IEN); + writew_relaxed(gpioiev, chip->base + ZX_GPIO_IV); + spin_unlock_irqrestore(&chip->lock, flags); + + return 0; +} + +static void zx_irq_handler(struct irq_desc *desc) +{ + unsigned long pending; + int offset; + struct gpio_chip *gc = irq_desc_get_handler_data(desc); + struct zx_gpio *chip = to_zx(gc); + struct irq_chip *irqchip = irq_desc_get_chip(desc); + + chained_irq_enter(irqchip, desc); + + pending = readw_relaxed(chip->base + ZX_GPIO_MIS); + writew_relaxed(pending, chip->base + ZX_GPIO_IC); + if (pending) { + for_each_set_bit(offset, &pending, ZX_GPIO_NR) + generic_handle_irq(irq_find_mapping(gc->irqdomain, + offset)); + } + + chained_irq_exit(irqchip, desc); +} + +static void zx_irq_mask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zx_gpio *chip = to_zx(gc); + u16 mask = BIT(irqd_to_hwirq(d) % ZX_GPIO_NR); + u16 gpioie; + + spin_lock(&chip->lock); + gpioie = readw_relaxed(chip->base + ZX_GPIO_IM) | mask; + writew_relaxed(gpioie, chip->base + ZX_GPIO_IM); + gpioie = readw_relaxed(chip->base + ZX_GPIO_IE) & ~mask; + writew_relaxed(gpioie, chip->base + ZX_GPIO_IE); + spin_unlock(&chip->lock); +} + +static void zx_irq_unmask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zx_gpio *chip = to_zx(gc); + u16 mask = BIT(irqd_to_hwirq(d) % ZX_GPIO_NR); + u16 gpioie; + + spin_lock(&chip->lock); + gpioie = readw_relaxed(chip->base + ZX_GPIO_IM) & ~mask; + writew_relaxed(gpioie, chip->base + ZX_GPIO_IM); + gpioie = readw_relaxed(chip->base + ZX_GPIO_IE) | mask; + writew_relaxed(gpioie, chip->base + ZX_GPIO_IE); + spin_unlock(&chip->lock); +} + +static struct irq_chip zx_irqchip = { + .name = "zx-gpio", + .irq_mask = zx_irq_mask, + .irq_unmask = zx_irq_unmask, + .irq_set_type = zx_irq_type, +}; + +static int zx_gpio_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct zx_gpio *chip; + struct resource *res; + int irq, id, ret; + + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + chip->base = devm_ioremap_resource(dev, res); + if (IS_ERR(chip->base)) + return PTR_ERR(chip->base); + + spin_lock_init(&chip->lock); + if (of_property_read_bool(dev->of_node, "gpio-ranges")) + chip->uses_pinctrl = true; + + id = of_alias_get_id(dev->of_node, "gpio"); + chip->gc.request = zx_gpio_request; + chip->gc.free = zx_gpio_free; + chip->gc.direction_input = zx_direction_input; + chip->gc.direction_output = zx_direction_output; + chip->gc.get = zx_get_value; + chip->gc.set = zx_set_value; + chip->gc.base = ZX_GPIO_NR * id; + chip->gc.ngpio = ZX_GPIO_NR; + chip->gc.label = dev_name(dev); + chip->gc.dev = dev; + chip->gc.owner = THIS_MODULE; + + ret = gpiochip_add(&chip->gc); + if (ret) + return ret; + + /* + * irq_chip support + */ + writew_relaxed(0xffff, chip->base + ZX_GPIO_IM); + writew_relaxed(0, chip->base + ZX_GPIO_IE); + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "invalid IRQ\n"); + gpiochip_remove(&chip->gc); + return -ENODEV; + } + + ret = gpiochip_irqchip_add(&chip->gc, &zx_irqchip, + 0, handle_simple_irq, + IRQ_TYPE_NONE); + if (ret) { + dev_err(dev, "could not add irqchip\n"); + gpiochip_remove(&chip->gc); + return ret; + } + gpiochip_set_chained_irqchip(&chip->gc, &zx_irqchip, + irq, zx_irq_handler); + + platform_set_drvdata(pdev, chip); + dev_info(dev, "ZX GPIO chip registered\n"); + + return 0; +} + +static const struct of_device_id zx_gpio_match[] = { + { + .compatible = "zte,zx296702-gpio", + }, + { }, +}; +MODULE_DEVICE_TABLE(of, zx_gpio_match); + +static struct platform_driver zx_gpio_driver = { + .probe = zx_gpio_probe, + .driver = { + .name = "zx_gpio", + .of_match_table = of_match_ptr(zx_gpio_match), + }, +}; + +module_platform_driver(zx_gpio_driver) + +MODULE_AUTHOR("Jun Nie "); +MODULE_DESCRIPTION("ZTE ZX296702 GPIO driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index a78882389..1d1a5865e 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c @@ -441,10 +441,10 @@ static int zynq_gpio_set_irq_type(struct irq_data *irq_data, unsigned int type) gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num)); if (type & IRQ_TYPE_LEVEL_MASK) { - __irq_set_chip_handler_name_locked(irq_data->irq, + irq_set_chip_handler_name_locked(irq_data, &zynq_gpio_level_irqchip, handle_fasteoi_irq, NULL); } else { - __irq_set_chip_handler_name_locked(irq_data->irq, + irq_set_chip_handler_name_locked(irq_data, &zynq_gpio_edge_irqchip, handle_level_irq, NULL); } @@ -514,11 +514,11 @@ static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio, * application for that pin. * Note: A bug is reported if no handler is set for the gpio pin. */ -static void zynq_gpio_irqhandler(unsigned int irq, struct irq_desc *desc) +static void zynq_gpio_irqhandler(struct irq_desc *desc) { u32 int_sts, int_enb; unsigned int bank_num; - struct zynq_gpio *gpio = irq_get_handler_data(irq); + struct zynq_gpio *gpio = irq_desc_get_handler_data(desc); struct irq_chip *irqchip = irq_desc_get_chip(desc); chained_irq_enter(irqchip, desc); @@ -782,6 +782,12 @@ static int __init zynq_gpio_init(void) } postcore_initcall(zynq_gpio_init); +static void __exit zynq_gpio_exit(void) +{ + platform_driver_unregister(&zynq_gpio_driver); +} +module_exit(zynq_gpio_exit); + MODULE_AUTHOR("Xilinx Inc."); MODULE_DESCRIPTION("Zynq GPIO driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 533fe5dbe..143a9bdba 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -68,7 +68,7 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) * GPIO controller driver. * * Typically the returned offset is same as @pin, but if the GPIO - * controller uses pin controller and the mapping is not contigous the + * controller uses pin controller and the mapping is not contiguous the * offset might be different. */ static int acpi_gpiochip_pin_to_gpio_offset(struct gpio_chip *chip, int pin) diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 9a0ec48a4..fa6e3c882 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -136,7 +136,6 @@ static struct gpio_desc *of_get_gpio_hog(struct device_node *np, { struct device_node *chip_np; enum of_gpio_flags xlate_flags; - struct gpio_desc *desc; struct gg_data gg_data = { .flags = &xlate_flags, }; @@ -193,9 +192,7 @@ static struct gpio_desc *of_get_gpio_hog(struct device_node *np, if (name && of_property_read_string(np, "line-name", name)) *name = np->name; - desc = gg_data.out_gpio; - - return desc; + return gg_data.out_gpio; } /** @@ -338,7 +335,7 @@ void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc) EXPORT_SYMBOL(of_mm_gpiochip_remove); #ifdef CONFIG_PINCTRL -static void of_gpiochip_add_pin_range(struct gpio_chip *chip) +static int of_gpiochip_add_pin_range(struct gpio_chip *chip) { struct device_node *np = chip->of_node; struct of_phandle_args pinspec; @@ -349,7 +346,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip) struct property *group_names; if (!np) - return; + return 0; group_names = of_find_property(np, group_names_propname, NULL); @@ -361,11 +358,11 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip) pctldev = of_pinctrl_get(pinspec.np); if (!pctldev) - break; + return -EPROBE_DEFER; if (pinspec.args[2]) { if (group_names) { - ret = of_property_read_string_index(np, + of_property_read_string_index(np, group_names_propname, index, &name); if (strlen(name)) { @@ -381,7 +378,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip) pinspec.args[1], pinspec.args[2]); if (ret) - break; + return ret; } else { /* npins == 0: special range */ if (pinspec.args[1]) { @@ -411,32 +408,41 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip) ret = gpiochip_add_pingroup_range(chip, pctldev, pinspec.args[0], name); if (ret) - break; + return ret; } } + + return 0; } #else -static void of_gpiochip_add_pin_range(struct gpio_chip *chip) {} +static int of_gpiochip_add_pin_range(struct gpio_chip *chip) { return 0; } #endif -void of_gpiochip_add(struct gpio_chip *chip) +int of_gpiochip_add(struct gpio_chip *chip) { + int status; + if ((!chip->of_node) && (chip->dev)) chip->of_node = chip->dev->of_node; if (!chip->of_node) - return; + return 0; if (!chip->of_xlate) { chip->of_gpio_n_cells = 2; chip->of_xlate = of_gpio_simple_xlate; } - of_gpiochip_add_pin_range(chip); + status = of_gpiochip_add_pin_range(chip); + if (status) + return status; + of_node_get(chip->of_node); of_gpiochip_scan_hogs(chip); + + return 0; } void of_gpiochip_remove(struct gpio_chip *chip) diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index bf4bd1d12..5db344555 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -190,7 +190,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_direction); */ static int gpiochip_add_to_list(struct gpio_chip *chip) { - struct list_head *pos = &gpio_chips; + struct list_head *pos; struct gpio_chip *_chip; int err = 0; @@ -287,7 +287,13 @@ int gpiochip_add(struct gpio_chip *chip) INIT_LIST_HEAD(&chip->pin_ranges); #endif - of_gpiochip_add(chip); + if (!chip->owner && chip->dev && chip->dev->driver) + chip->owner = chip->dev->driver->owner; + + status = of_gpiochip_add(chip); + if (status) + goto err_remove_chip; + acpi_gpiochip_add(chip); status = gpiochip_sysfs_register(chip); @@ -443,8 +449,8 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, * The parent irqchip is already using the chip_data for this * irqchip, so our callbacks simply use the handler_data. */ - irq_set_handler_data(parent_irq, gpiochip); - irq_set_chained_handler(parent_irq, parent_handler); + irq_set_chained_handler_and_data(parent_irq, parent_handler, + gpiochip); gpiochip->irq_parent = parent_irq; } @@ -456,12 +462,6 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, } EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip); -/* - * This lock class tells lockdep that GPIO irqs are in a different - * category than their parents, so it won't report false recursion. - */ -static struct lock_class_key gpiochip_irq_lock_class; - /** * gpiochip_irq_map() - maps an IRQ into a GPIO irqchip * @d: the irqdomain used by this irqchip @@ -478,16 +478,17 @@ static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, struct gpio_chip *chip = d->host_data; irq_set_chip_data(irq, chip); - irq_set_lockdep_class(irq, &gpiochip_irq_lock_class); + /* + * This lock class tells lockdep that GPIO irqs are in a different + * category than their parents, so it won't report false recursion. + */ + irq_set_lockdep_class(irq, chip->lock_key); irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler); /* Chips that can sleep need nested thread handlers */ if (chip->can_sleep && !chip->irq_not_threaded) irq_set_nested_thread(irq, 1); -#ifdef CONFIG_ARM - set_irq_flags(irq, IRQF_VALID); -#else irq_set_noprobe(irq); -#endif + /* * No set-up of the hardware will happen if IRQ_TYPE_NONE * is passed as default type. @@ -502,9 +503,6 @@ static void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq) { struct gpio_chip *chip = d->host_data; -#ifdef CONFIG_ARM - set_irq_flags(irq, 0); -#endif if (chip->can_sleep) irq_set_nested_thread(irq, 0); irq_set_chip_and_handler(irq, NULL, NULL); @@ -522,10 +520,14 @@ static int gpiochip_irq_reqres(struct irq_data *d) { struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + if (!try_module_get(chip->owner)) + return -ENODEV; + if (gpiochip_lock_as_irq(chip, d->hwirq)) { chip_err(chip, "unable to lock HW IRQ %lu for IRQ\n", d->hwirq); + module_put(chip->owner); return -EINVAL; } return 0; @@ -536,6 +538,7 @@ static void gpiochip_irq_relres(struct irq_data *d) struct gpio_chip *chip = irq_data_get_irq_chip_data(d); gpiochip_unlock_as_irq(chip, d->hwirq); + module_put(chip->owner); } static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset) @@ -584,6 +587,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) * @handler: the irq handler to use (often a predefined irq core function) * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE * to have the core avoid setting up any default type in the hardware. + * @lock_key: lockdep class * * This function closely associates a certain irqchip with a certain * gpiochip, providing an irq domain to translate the local IRQs to @@ -599,11 +603,12 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) * the pins on the gpiochip can generate a unique IRQ. Everything else * need to be open coded. */ -int gpiochip_irqchip_add(struct gpio_chip *gpiochip, - struct irq_chip *irqchip, - unsigned int first_irq, - irq_flow_handler_t handler, - unsigned int type) +int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, + struct irq_chip *irqchip, + unsigned int first_irq, + irq_flow_handler_t handler, + unsigned int type, + struct lock_class_key *lock_key) { struct device_node *of_node; unsigned int offset; @@ -629,6 +634,7 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip, gpiochip->irq_handler = handler; gpiochip->irq_default_type = type; gpiochip->to_irq = gpiochip_to_irq; + gpiochip->lock_key = lock_key; gpiochip->irqdomain = irq_domain_add_simple(of_node, gpiochip->ngpio, first_irq, &gpiochip_domain_ops, gpiochip); @@ -636,8 +642,16 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip, gpiochip->irqchip = NULL; return -EINVAL; } - irqchip->irq_request_resources = gpiochip_irq_reqres; - irqchip->irq_release_resources = gpiochip_irq_relres; + + /* + * It is possible for a driver to override this, but only if the + * alternative functions are both implemented. + */ + if (!irqchip->irq_request_resources && + !irqchip->irq_release_resources) { + irqchip->irq_request_resources = gpiochip_irq_reqres; + irqchip->irq_release_resources = gpiochip_irq_relres; + } /* * Prepare the mapping since the irqchip shall be orthogonal to @@ -658,7 +672,7 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip, return 0; } -EXPORT_SYMBOL_GPL(gpiochip_irqchip_add); +EXPORT_SYMBOL_GPL(_gpiochip_irqchip_add); #else /* CONFIG_GPIOLIB_IRQCHIP */ @@ -671,7 +685,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {} /** * gpiochip_add_pingroup_range() - add a range for GPIO <-> pin mapping * @chip: the gpiochip to add the range for - * @pinctrl: the dev_name() of the pin controller to map to + * @pctldev: the pin controller to map to * @gpio_offset: the start offset in the current gpio_chip number space * @pin_group: name of the pin group inside the pin controller */ @@ -1160,15 +1174,16 @@ EXPORT_SYMBOL_GPL(gpiod_is_active_low); * that the GPIO was actually requested. */ -static bool _gpiod_get_raw_value(const struct gpio_desc *desc) +static int _gpiod_get_raw_value(const struct gpio_desc *desc) { struct gpio_chip *chip; - bool value; int offset; + int value; chip = desc->chip; offset = gpio_chip_hwgpio(desc); - value = chip->get ? chip->get(chip, offset) : false; + value = chip->get ? chip->get(chip, offset) : -EIO; + value = value < 0 ? value : !!value; trace_gpio_value(desc_to_gpio(desc), 1, value); return value; } @@ -1178,7 +1193,7 @@ static bool _gpiod_get_raw_value(const struct gpio_desc *desc) * @desc: gpio whose value will be returned * * Return the GPIO's raw value, i.e. the value of the physical line disregarding - * its ACTIVE_LOW status. + * its ACTIVE_LOW status, or negative errno on failure. * * This function should be called from contexts where we cannot sleep, and will * complain if the GPIO chip functions potentially sleep. @@ -1198,7 +1213,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_value); * @desc: gpio whose value will be returned * * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into - * account. + * account, or negative errno on failure. * * This function should be called from contexts where we cannot sleep, and will * complain if the GPIO chip functions potentially sleep. @@ -1212,6 +1227,9 @@ int gpiod_get_value(const struct gpio_desc *desc) WARN_ON(desc->chip->can_sleep); value = _gpiod_get_raw_value(desc); + if (value < 0) + return value; + if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) value = !value; @@ -1534,7 +1552,7 @@ EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq); * @desc: gpio whose value will be returned * * Return the GPIO's raw value, i.e. the value of the physical line disregarding - * its ACTIVE_LOW status. + * its ACTIVE_LOW status, or negative errno on failure. * * This function is to be called from contexts that can sleep. */ @@ -1552,7 +1570,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_value_cansleep); * @desc: gpio whose value will be returned * * Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into - * account. + * account, or negative errno on failure. * * This function is to be called from contexts that can sleep. */ @@ -1565,6 +1583,9 @@ int gpiod_get_value_cansleep(const struct gpio_desc *desc) return 0; value = _gpiod_get_raw_value(desc); + if (value < 0) + return value; + if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) value = !value; @@ -1672,6 +1693,19 @@ void gpiod_add_lookup_table(struct gpiod_lookup_table *table) mutex_unlock(&gpio_lookup_lock); } +/** + * gpiod_remove_lookup_table() - unregister GPIO device consumers + * @table: table of consumers to unregister + */ +void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) +{ + mutex_lock(&gpio_lookup_lock); + + list_del(&table->list); + + mutex_unlock(&gpio_lookup_lock); +} + static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, unsigned int idx, enum gpio_lookup_flags *flags) @@ -1894,12 +1928,12 @@ EXPORT_SYMBOL_GPL(gpiod_count); * dev, -ENOENT if no GPIO has been assigned to the requested function, or * another IS_ERR() code if an error occurred while trying to acquire the GPIO. */ -struct gpio_desc *__must_check __gpiod_get(struct device *dev, const char *con_id, +struct gpio_desc *__must_check gpiod_get(struct device *dev, const char *con_id, enum gpiod_flags flags) { return gpiod_get_index(dev, con_id, 0, flags); } -EXPORT_SYMBOL_GPL(__gpiod_get); +EXPORT_SYMBOL_GPL(gpiod_get); /** * gpiod_get_optional - obtain an optional GPIO for a given GPIO function @@ -1911,13 +1945,13 @@ EXPORT_SYMBOL_GPL(__gpiod_get); * the requested function it will return NULL. This is convenient for drivers * that need to handle optional GPIOs. */ -struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev, +struct gpio_desc *__must_check gpiod_get_optional(struct device *dev, const char *con_id, enum gpiod_flags flags) { return gpiod_get_index_optional(dev, con_id, 0, flags); } -EXPORT_SYMBOL_GPL(__gpiod_get_optional); +EXPORT_SYMBOL_GPL(gpiod_get_optional); /** @@ -1974,7 +2008,7 @@ static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id, * requested function and/or index, or another IS_ERR() code if an error * occurred while trying to acquire the GPIO. */ -struct gpio_desc *__must_check __gpiod_get_index(struct device *dev, +struct gpio_desc *__must_check gpiod_get_index(struct device *dev, const char *con_id, unsigned int idx, enum gpiod_flags flags) @@ -2023,7 +2057,7 @@ struct gpio_desc *__must_check __gpiod_get_index(struct device *dev, return desc; } -EXPORT_SYMBOL_GPL(__gpiod_get_index); +EXPORT_SYMBOL_GPL(gpiod_get_index); /** * fwnode_get_named_gpiod - obtain a GPIO from firmware node @@ -2092,7 +2126,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod); * specified index was assigned to the requested function it will return NULL. * This is convenient for drivers that need to handle optional GPIOs. */ -struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev, +struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev, const char *con_id, unsigned int index, enum gpiod_flags flags) @@ -2107,7 +2141,7 @@ struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev, return desc; } -EXPORT_SYMBOL_GPL(__gpiod_get_index_optional); +EXPORT_SYMBOL_GPL(gpiod_get_index_optional); /** * gpiod_hog - Hog the specified GPIO desc given the provided flags diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index c46ca311d..1a0a8df2e 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -37,9 +37,29 @@ config DRM_KMS_FB_HELPER select FB select FRAMEBUFFER_CONSOLE if !EXPERT select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE + select FB_SYS_FOPS + select FB_SYS_FILLRECT + select FB_SYS_COPYAREA + select FB_SYS_IMAGEBLIT + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT help FBDEV helpers for KMS drivers. +config DRM_FBDEV_EMULATION + bool "Enable legacy fbdev support for your modesetting driver" + depends on DRM + select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER + default y + help + Choose this option if you have a need for the legacy fbdev + support. Note that this support also provides the linux console + support on top of your modesetting driver. + + If in doubt, say "Y". + config DRM_LOAD_EDID_FIRMWARE bool "Allow to specify an EDID data set instead of probing for it" depends on DRM_KMS_HELPER @@ -79,8 +99,6 @@ config DRM_KMS_CMA_HELPER source "drivers/gpu/drm/i2c/Kconfig" -source "drivers/gpu/drm/bridge/Kconfig" - config DRM_TDFX tristate "3dfx Banshee/Voodoo3+" depends on DRM && PCI @@ -110,6 +128,7 @@ config DRM_RADEON select POWER_SUPPLY select HWMON select BACKLIGHT_CLASS_DEVICE + select BACKLIGHT_LCD_SUPPORT select INTERVAL_TREE help Choose this option if you have an ATI Radeon graphics card. There @@ -133,6 +152,7 @@ config DRM_AMDGPU select POWER_SUPPLY select HWMON select BACKLIGHT_CLASS_DEVICE + select BACKLIGHT_LCD_SUPPORT select INTERVAL_TREE help Choose this option if you have a recent AMD Radeon graphics card. @@ -231,10 +251,14 @@ source "drivers/gpu/drm/virtio/Kconfig" source "drivers/gpu/drm/msm/Kconfig" +source "drivers/gpu/drm/fsl-dcu/Kconfig" + source "drivers/gpu/drm/tegra/Kconfig" source "drivers/gpu/drm/panel/Kconfig" +source "drivers/gpu/drm/bridge/Kconfig" + source "drivers/gpu/drm/sti/Kconfig" source "drivers/gpu/drm/amd/amdkfd/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 5713d0534..45e771984 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -23,7 +23,7 @@ drm-$(CONFIG_OF) += drm_of.o drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o -drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o +drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o @@ -70,3 +70,4 @@ obj-$(CONFIG_DRM_IMX) += imx/ obj-y += i2c/ obj-y += panel/ obj-y += bridge/ +obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/ diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 616dfd4a1..04c270757 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -3,7 +3,9 @@ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/asic_reg \ - -Idrivers/gpu/drm/amd/include + -Idrivers/gpu/drm/amd/include \ + -Idrivers/gpu/drm/amd/amdgpu \ + -Idrivers/gpu/drm/amd/scheduler amdgpu-y := amdgpu_drv.o @@ -21,7 +23,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ # add asic specific block amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \ - ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o + ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \ + amdgpu_amdkfd_gfx_v7.o amdgpu-y += \ vi.o @@ -43,6 +46,7 @@ amdgpu-y += \ amdgpu_dpm.o \ cz_smc.o cz_dpm.o \ tonga_smc.o tonga_dpm.o \ + fiji_smc.o fiji_dpm.o \ iceland_smc.o iceland_dpm.o # add DCE block @@ -71,6 +75,20 @@ amdgpu-y += \ amdgpu_vce.o \ vce_v3_0.o +# add amdkfd interfaces +amdgpu-y += \ + amdgpu_amdkfd.o \ + amdgpu_amdkfd_gfx_v8.o + +# add cgs +amdgpu-y += amdgpu_cgs.o + +# GPU scheduler +amdgpu-y += \ + ../scheduler/gpu_scheduler.o \ + ../scheduler/sched_fence.o \ + amdgpu_sched.o + amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index f7b49d5ce..0d13e6368 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -42,17 +42,19 @@ #include #include +#include #include #include #include "amd_shared.h" -#include "amdgpu_family.h" #include "amdgpu_mode.h" #include "amdgpu_ih.h" #include "amdgpu_irq.h" #include "amdgpu_ucode.h" #include "amdgpu_gds.h" +#include "gpu_scheduler.h" + /* * Modules parameters. */ @@ -77,7 +79,12 @@ extern int amdgpu_bapm; extern int amdgpu_deep_color; extern int amdgpu_vm_size; extern int amdgpu_vm_block_size; +extern int amdgpu_enable_scheduler; +extern int amdgpu_sched_jobs; +extern int amdgpu_sched_hw_submission; +extern int amdgpu_enable_semaphores; +#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) /* AMDGPU_IB_POOL_SIZE must be a power of 2 */ @@ -92,6 +99,9 @@ extern int amdgpu_vm_block_size; #define AMDGPU_MAX_COMPUTE_RINGS 8 #define AMDGPU_MAX_VCE_RINGS 2 +/* max number of IP instances */ +#define AMDGPU_MAX_SDMA_INSTANCES 2 + /* number of hw syncs before falling back on blocking */ #define AMDGPU_NUM_SYNCS 4 @@ -177,7 +187,9 @@ struct amdgpu_vm; struct amdgpu_ring; struct amdgpu_semaphore; struct amdgpu_cs_parser; +struct amdgpu_job; struct amdgpu_irq_src; +struct amdgpu_fpriv; enum amdgpu_cp_irq { AMDGPU_CP_IRQ_GFX_EOP = 0, @@ -239,7 +251,7 @@ struct amdgpu_buffer_funcs { unsigned copy_num_dw; /* used for buffer migration */ - void (*emit_copy_buffer)(struct amdgpu_ring *ring, + void (*emit_copy_buffer)(struct amdgpu_ib *ib, /* src addr in bytes */ uint64_t src_offset, /* dst addr in bytes */ @@ -254,7 +266,7 @@ struct amdgpu_buffer_funcs { unsigned fill_num_dw; /* used for buffer clearing */ - void (*emit_fill_buffer)(struct amdgpu_ring *ring, + void (*emit_fill_buffer)(struct amdgpu_ib *ib, /* value to write to memory */ uint32_t src_data, /* dst addr in bytes */ @@ -332,6 +344,8 @@ struct amdgpu_ring_funcs { int (*test_ring)(struct amdgpu_ring *ring); int (*test_ib)(struct amdgpu_ring *ring); bool (*is_lockup)(struct amdgpu_ring *ring); + /* insert NOP packets */ + void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); }; /* @@ -381,10 +395,10 @@ struct amdgpu_fence_driver { uint64_t sync_seq[AMDGPU_MAX_RINGS]; atomic64_t last_seq; bool initialized; - bool delayed_irq; struct amdgpu_irq_src *irq_src; unsigned irq_type; struct delayed_work lockup_work; + wait_queue_head_t fence_queue; }; /* some special values for the owner field */ @@ -419,24 +433,24 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev); void amdgpu_fence_driver_fini(struct amdgpu_device *adev); void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); -void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); +int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, struct amdgpu_irq_src *irq_src, unsigned irq_type); +void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); +void amdgpu_fence_driver_resume(struct amdgpu_device *adev); int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, struct amdgpu_fence **fence); -int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner, - uint64_t seq, struct amdgpu_fence **fence); void amdgpu_fence_process(struct amdgpu_ring *ring); int amdgpu_fence_wait_next(struct amdgpu_ring *ring); int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); -bool amdgpu_fence_signaled(struct amdgpu_fence *fence); -int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible); -int amdgpu_fence_wait_any(struct amdgpu_device *adev, - struct amdgpu_fence **fences, - bool intr); +signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, + struct fence **array, + uint32_t count, + bool intr, + signed long t); struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence); void amdgpu_fence_unref(struct amdgpu_fence **fence); @@ -481,7 +495,7 @@ static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a, return a->seq < b->seq; } -int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user, +int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user, void *owner, struct amdgpu_fence **fence); /* @@ -509,7 +523,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t dst_offset, uint32_t byte_count, struct reservation_object *resv, - struct amdgpu_fence **fence); + struct fence **fence); int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); struct amdgpu_bo_list_entry { @@ -532,14 +546,16 @@ struct amdgpu_bo_va_mapping { struct amdgpu_bo_va { /* protected by bo being reserved */ struct list_head bo_list; - uint64_t addr; - struct amdgpu_fence *last_pt_update; + struct fence *last_pt_update; unsigned ref_count; - /* protected by vm mutex */ - struct list_head mappings; + /* protected by vm mutex and spinlock */ struct list_head vm_status; + /* mappings for this bo_va */ + struct list_head invalids; + struct list_head valids; + /* constant after initialization */ struct amdgpu_vm *vm; struct amdgpu_bo *bo; @@ -643,7 +659,7 @@ struct amdgpu_sa_bo { struct amdgpu_sa_manager *manager; unsigned soffset; unsigned eoffset; - struct amdgpu_fence *fence; + struct fence *fence; }; /* @@ -685,7 +701,7 @@ bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring, struct amdgpu_semaphore *semaphore); void amdgpu_semaphore_free(struct amdgpu_device *adev, struct amdgpu_semaphore **semaphore, - struct amdgpu_fence *fence); + struct fence *fence); /* * Synchronization @@ -693,20 +709,23 @@ void amdgpu_semaphore_free(struct amdgpu_device *adev, struct amdgpu_sync { struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS]; struct amdgpu_fence *sync_to[AMDGPU_MAX_RINGS]; - struct amdgpu_fence *last_vm_update; + DECLARE_HASHTABLE(fences, 4); + struct fence *last_vm_update; }; void amdgpu_sync_create(struct amdgpu_sync *sync); -void amdgpu_sync_fence(struct amdgpu_sync *sync, - struct amdgpu_fence *fence); +int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, + struct fence *f); int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct reservation_object *resv, void *owner); int amdgpu_sync_rings(struct amdgpu_sync *sync, struct amdgpu_ring *ring); +struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); +int amdgpu_sync_wait(struct amdgpu_sync *sync); void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct amdgpu_fence *fence); + struct fence *fence); /* * GART structures, functions & helpers @@ -821,7 +840,9 @@ struct amdgpu_flip_work { uint64_t base; struct drm_pending_vblank_event *event; struct amdgpu_bo *old_rbo; - struct fence *fence; + struct fence *excl; + unsigned shared_count; + struct fence **shared; }; @@ -844,6 +865,8 @@ struct amdgpu_ib { uint32_t gws_base, gws_size; uint32_t oa_base, oa_size; uint32_t flags; + /* resulting sequence number */ + uint64_t sequence; }; enum amdgpu_ring_type { @@ -854,11 +877,23 @@ enum amdgpu_ring_type { AMDGPU_RING_TYPE_VCE }; +extern struct amd_sched_backend_ops amdgpu_sched_ops; + +int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_ib *ibs, + unsigned num_ibs, + int (*free_job)(struct amdgpu_job *), + void *owner, + struct fence **fence); + struct amdgpu_ring { struct amdgpu_device *adev; const struct amdgpu_ring_funcs *funcs; struct amdgpu_fence_driver fence_drv; + struct amd_gpu_scheduler sched; + spinlock_t fence_lock; struct mutex *ring_lock; struct amdgpu_bo *ring_obj; volatile uint32_t *ring; @@ -892,6 +927,7 @@ struct amdgpu_ring { struct amdgpu_ctx *current_ctx; enum amdgpu_ring_type type; char name[16]; + bool is_pte_ring; }; /* @@ -933,7 +969,7 @@ struct amdgpu_vm_id { unsigned id; uint64_t pd_gpu_addr; /* last flushed PD/PT update */ - struct amdgpu_fence *flushed_updates; + struct fence *flushed_updates; /* last use of vmid */ struct amdgpu_fence *last_id_use; }; @@ -943,18 +979,22 @@ struct amdgpu_vm { struct rb_root va; - /* protecting invalidated and freed */ + /* protecting invalidated */ spinlock_t status_lock; /* BOs moved, but not yet updated in the PT */ struct list_head invalidated; - /* BOs freed, but not yet updated in the PT */ + /* BOs cleared in the PT because of a move */ + struct list_head cleared; + + /* BO mappings freed, but not yet updated in the PT */ struct list_head freed; /* contains the page directory */ struct amdgpu_bo *page_directory; unsigned max_pde_used; + struct fence *page_directory_fence; /* array of page tables, one for each page directory entry */ struct amdgpu_vm_pt *page_tables; @@ -983,27 +1023,47 @@ struct amdgpu_vm_manager { * context related structures */ -struct amdgpu_ctx_state { - uint64_t flags; - uint32_t hangs; +#define AMDGPU_CTX_MAX_CS_PENDING 16 + +struct amdgpu_ctx_ring { + uint64_t sequence; + struct fence *fences[AMDGPU_CTX_MAX_CS_PENDING]; + struct amd_sched_entity entity; }; struct amdgpu_ctx { - /* call kref_get()before CS start and kref_put() after CS fence signaled */ - struct kref refcount; - struct amdgpu_fpriv *fpriv; - struct amdgpu_ctx_state state; - uint32_t id; - unsigned reset_counter; + struct kref refcount; + struct amdgpu_device *adev; + unsigned reset_counter; + spinlock_t ring_lock; + struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; }; struct amdgpu_ctx_mgr { - struct amdgpu_device *adev; - struct idr ctx_handles; - /* lock for IDR system */ - struct mutex lock; + struct amdgpu_device *adev; + struct mutex lock; + /* protected by lock */ + struct idr ctx_handles; }; +int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, + struct amdgpu_ctx *ctx); +void amdgpu_ctx_fini(struct amdgpu_ctx *ctx); + +struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); +int amdgpu_ctx_put(struct amdgpu_ctx *ctx); + +uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, + struct fence *fence); +struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, + struct amdgpu_ring *ring, uint64_t seq); + +int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); + +void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); +void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); + /* * file private structure */ @@ -1012,7 +1072,7 @@ struct amdgpu_fpriv { struct amdgpu_vm vm; struct mutex bo_list_lock; struct idr bo_list_handles; - struct amdgpu_ctx_mgr ctx_mgr; + struct amdgpu_ctx_mgr ctx_mgr; }; /* @@ -1142,8 +1202,6 @@ struct amdgpu_gfx { struct amdgpu_irq_src priv_inst_irq; /* gfx status */ uint32_t gfx_current_status; - /* sync signal for const engine */ - unsigned ce_sync_offs; /* ce ram size*/ unsigned ce_ram_size; }; @@ -1160,6 +1218,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev); void amdgpu_ring_free_size(struct amdgpu_ring *ring); int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw); +void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); void amdgpu_ring_commit(struct amdgpu_ring *ring); void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring); void amdgpu_ring_undo(struct amdgpu_ring *ring); @@ -1207,6 +1266,18 @@ struct amdgpu_cs_parser { struct amdgpu_user_fence uf; }; +struct amdgpu_job { + struct amd_sched_job base; + struct amdgpu_device *adev; + struct amdgpu_ib *ibs; + uint32_t num_ibs; + struct mutex job_lock; + struct amdgpu_user_fence uf; + int (*free_job)(struct amdgpu_job *job); +}; +#define to_amdgpu_job(sched_job) \ + container_of((sched_job), struct amdgpu_job, base) + static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) { return p->ibs[ib_idx].ptr[idx]; @@ -1583,6 +1654,7 @@ struct amdgpu_pm { u8 fan_max_rpm; /* dpm */ bool dpm_enabled; + bool sysfs_initialized; struct amdgpu_dpm dpm; const struct firmware *fw; /* SMC firmware */ uint32_t fw_version; @@ -1601,7 +1673,6 @@ struct amdgpu_uvd { struct amdgpu_bo *vcpu_bo; void *cpu_addr; uint64_t gpu_addr; - void *saved_bo; atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; struct delayed_work idle_work; @@ -1645,6 +1716,7 @@ struct amdgpu_sdma { uint32_t feature_version; struct amdgpu_ring ring; + bool burst_nop; }; /* @@ -1849,17 +1921,12 @@ struct amdgpu_atcs { struct amdgpu_atcs_functions functions; }; -int amdgpu_ctx_alloc(struct amdgpu_device *adev,struct amdgpu_fpriv *fpriv, - uint32_t *id,uint32_t flags); -int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, - uint32_t id); - -void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv); -struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); -int amdgpu_ctx_put(struct amdgpu_ctx *ctx); +/* + * CGS + */ +void *amdgpu_cgs_create_device(struct amdgpu_device *adev); +void amdgpu_cgs_destroy_device(void *cgs_device); -extern int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, - struct drm_file *filp); /* * Core structure, functions and helpers. @@ -1883,7 +1950,7 @@ struct amdgpu_device { struct rw_semaphore exclusive_lock; /* ASIC */ - enum amdgpu_asic_type asic_type; + enum amd_asic_type asic_type; uint32_t family; uint32_t rev_id; uint32_t external_rev_id; @@ -1976,7 +2043,6 @@ struct amdgpu_device { struct amdgpu_irq_src hpd_irq; /* rings */ - wait_queue_head_t fence_queue; unsigned fence_context; struct mutex ring_lock; unsigned num_rings; @@ -1999,7 +2065,7 @@ struct amdgpu_device { struct amdgpu_gfx gfx; /* sdma */ - struct amdgpu_sdma sdma[2]; + struct amdgpu_sdma sdma[AMDGPU_MAX_SDMA_INSTANCES]; struct amdgpu_irq_src sdma_trap_irq; struct amdgpu_irq_src sdma_illegal_inst_irq; @@ -2025,6 +2091,12 @@ struct amdgpu_device { /* tracking pinned memory */ u64 vram_pin_size; u64 gart_pin_size; + + /* amdkfd interface */ + struct kfd_dev *kfd; + + /* kernel conext for IB submission */ + struct amdgpu_ctx kernel_ctx; }; bool amdgpu_device_is_px(struct drm_device *dev); @@ -2132,6 +2204,21 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) ring->ring_free_dw--; } +static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + int i; + + for (i = 0; i < AMDGPU_MAX_SDMA_INSTANCES; i++) + if (&adev->sdma[i].ring == ring) + break; + + if (i < AMDGPU_MAX_SDMA_INSTANCES) + return &adev->sdma[i]; + else + return NULL; +} + /* * ASICs macro. */ @@ -2183,8 +2270,8 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s)) #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) -#define amdgpu_emit_copy_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((r), (s), (d), (b)) -#define amdgpu_emit_fill_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((r), (s), (d), (b)) +#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) +#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) #define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev)) #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) @@ -2212,6 +2299,12 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_card_posted(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); +struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, + struct drm_file *filp, + struct amdgpu_ctx *ctx, + struct amdgpu_ib *ibs, + uint32_t num_ibs); + int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, u32 ip_instance, u32 ring, @@ -2275,11 +2368,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct list_head *head); -struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, - struct amdgpu_vm *vm); +int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, + struct amdgpu_sync *sync); void amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_vm *vm, - struct amdgpu_fence *updates); + struct fence *updates); void amdgpu_vm_fence(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_fence *fence); @@ -2309,7 +2402,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, uint64_t addr); void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va); - +int amdgpu_vm_free_job(struct amdgpu_job *job); /* * functions used by amdgpu_encoder.c */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c new file mode 100644 index 000000000..84d68d658 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -0,0 +1,269 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "amdgpu_amdkfd.h" +#include "amd_shared.h" +#include +#include "amdgpu.h" +#include + +const struct kfd2kgd_calls *kfd2kgd; +const struct kgd2kfd_calls *kgd2kfd; +bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); + +bool amdgpu_amdkfd_init(void) +{ +#if defined(CONFIG_HSA_AMD_MODULE) + bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); + + kgd2kfd_init_p = symbol_request(kgd2kfd_init); + + if (kgd2kfd_init_p == NULL) + return false; +#endif + return true; +} + +bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev) +{ +#if defined(CONFIG_HSA_AMD_MODULE) + bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); +#endif + + switch (rdev->asic_type) { +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_KAVERI: + kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions(); + break; +#endif + case CHIP_CARRIZO: + kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions(); + break; + default: + return false; + } + +#if defined(CONFIG_HSA_AMD_MODULE) + kgd2kfd_init_p = symbol_request(kgd2kfd_init); + + if (kgd2kfd_init_p == NULL) { + kfd2kgd = NULL; + return false; + } + + if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) { + symbol_put(kgd2kfd_init); + kfd2kgd = NULL; + kgd2kfd = NULL; + + return false; + } + + return true; +#elif defined(CONFIG_HSA_AMD) + if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) { + kfd2kgd = NULL; + kgd2kfd = NULL; + return false; + } + + return true; +#else + kfd2kgd = NULL; + return false; +#endif +} + +void amdgpu_amdkfd_fini(void) +{ + if (kgd2kfd) { + kgd2kfd->exit(); + symbol_put(kgd2kfd_init); + } +} + +void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev) +{ + if (kgd2kfd) + rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev, + rdev->pdev, kfd2kgd); +} + +void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev) +{ + if (rdev->kfd) { + struct kgd2kfd_shared_resources gpu_resources = { + .compute_vmid_bitmap = 0xFF00, + + .first_compute_pipe = 1, + .compute_pipe_count = 4 - 1, + }; + + amdgpu_doorbell_get_kfd_info(rdev, + &gpu_resources.doorbell_physical_address, + &gpu_resources.doorbell_aperture_size, + &gpu_resources.doorbell_start_offset); + + kgd2kfd->device_init(rdev->kfd, &gpu_resources); + } +} + +void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev) +{ + if (rdev->kfd) { + kgd2kfd->device_exit(rdev->kfd); + rdev->kfd = NULL; + } +} + +void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev, + const void *ih_ring_entry) +{ + if (rdev->kfd) + kgd2kfd->interrupt(rdev->kfd, ih_ring_entry); +} + +void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev) +{ + if (rdev->kfd) + kgd2kfd->suspend(rdev->kfd); +} + +int amdgpu_amdkfd_resume(struct amdgpu_device *rdev) +{ + int r = 0; + + if (rdev->kfd) + r = kgd2kfd->resume(rdev->kfd); + + return r; +} + +u32 pool_to_domain(enum kgd_memory_pool p) +{ + switch (p) { + case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM; + default: return AMDGPU_GEM_DOMAIN_GTT; + } +} + +int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, + void **mem_obj, uint64_t *gpu_addr, + void **cpu_ptr) +{ + struct amdgpu_device *rdev = (struct amdgpu_device *)kgd; + struct kgd_mem **mem = (struct kgd_mem **) mem_obj; + int r; + + BUG_ON(kgd == NULL); + BUG_ON(gpu_addr == NULL); + BUG_ON(cpu_ptr == NULL); + + *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL); + if ((*mem) == NULL) + return -ENOMEM; + + r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo); + if (r) { + dev_err(rdev->dev, + "failed to allocate BO for amdkfd (%d)\n", r); + return r; + } + + /* map the buffer */ + r = amdgpu_bo_reserve((*mem)->bo, true); + if (r) { + dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r); + goto allocate_mem_reserve_bo_failed; + } + + r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT, + &(*mem)->gpu_addr); + if (r) { + dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r); + goto allocate_mem_pin_bo_failed; + } + *gpu_addr = (*mem)->gpu_addr; + + r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr); + if (r) { + dev_err(rdev->dev, + "(%d) failed to map bo to kernel for amdkfd\n", r); + goto allocate_mem_kmap_bo_failed; + } + *cpu_ptr = (*mem)->cpu_ptr; + + amdgpu_bo_unreserve((*mem)->bo); + + return 0; + +allocate_mem_kmap_bo_failed: + amdgpu_bo_unpin((*mem)->bo); +allocate_mem_pin_bo_failed: + amdgpu_bo_unreserve((*mem)->bo); +allocate_mem_reserve_bo_failed: + amdgpu_bo_unref(&(*mem)->bo); + + return r; +} + +void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) +{ + struct kgd_mem *mem = (struct kgd_mem *) mem_obj; + + BUG_ON(mem == NULL); + + amdgpu_bo_reserve(mem->bo, true); + amdgpu_bo_kunmap(mem->bo); + amdgpu_bo_unpin(mem->bo); + amdgpu_bo_unreserve(mem->bo); + amdgpu_bo_unref(&(mem->bo)); + kfree(mem); +} + +uint64_t get_vmem_size(struct kgd_dev *kgd) +{ + struct amdgpu_device *rdev = + (struct amdgpu_device *)kgd; + + BUG_ON(kgd == NULL); + + return rdev->mc.real_vram_size; +} + +uint64_t get_gpu_clock_counter(struct kgd_dev *kgd) +{ + struct amdgpu_device *rdev = (struct amdgpu_device *)kgd; + + if (rdev->asic_funcs->get_gpu_clock_counter) + return rdev->asic_funcs->get_gpu_clock_counter(rdev); + return 0; +} + +uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd) +{ + struct amdgpu_device *rdev = (struct amdgpu_device *)kgd; + + /* The sclk is in quantas of 10kHz */ + return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h new file mode 100644 index 000000000..a8be76554 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -0,0 +1,65 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */ + +#ifndef AMDGPU_AMDKFD_H_INCLUDED +#define AMDGPU_AMDKFD_H_INCLUDED + +#include +#include + +struct amdgpu_device; + +struct kgd_mem { + struct amdgpu_bo *bo; + uint64_t gpu_addr; + void *cpu_ptr; +}; + +bool amdgpu_amdkfd_init(void); +void amdgpu_amdkfd_fini(void); + +bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev); + +void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev); +int amdgpu_amdkfd_resume(struct amdgpu_device *rdev); +void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev, + const void *ih_ring_entry); +void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev); +void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev); +void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev); + +struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void); +struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void); + +/* Shared API */ +int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, + void **mem_obj, uint64_t *gpu_addr, + void **cpu_ptr); +void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); +uint64_t get_vmem_size(struct kgd_dev *kgd); +uint64_t get_gpu_clock_counter(struct kgd_dev *kgd); + +uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd); + +#endif /* AMDGPU_AMDKFD_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c new file mode 100644 index 000000000..dd2037bc0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -0,0 +1,670 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include "amdgpu.h" +#include "amdgpu_amdkfd.h" +#include "cikd.h" +#include "cik_sdma.h" +#include "amdgpu_ucode.h" +#include "gca/gfx_7_2_d.h" +#include "gca/gfx_7_2_enum.h" +#include "gca/gfx_7_2_sh_mask.h" +#include "oss/oss_2_0_d.h" +#include "oss/oss_2_0_sh_mask.h" +#include "gmc/gmc_7_1_d.h" +#include "gmc/gmc_7_1_sh_mask.h" +#include "cik_structs.h" + +#define CIK_PIPE_PER_MEC (4) + +enum { + MAX_TRAPID = 8, /* 3 bits in the bitfield. */ + MAX_WATCH_ADDRESSES = 4 +}; + +enum { + ADDRESS_WATCH_REG_ADDR_HI = 0, + ADDRESS_WATCH_REG_ADDR_LO, + ADDRESS_WATCH_REG_CNTL, + ADDRESS_WATCH_REG_MAX +}; + +/* not defined in the CI/KV reg file */ +enum { + ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL, + ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF, + ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000, + /* extend the mask to 26 bits to match the low address field */ + ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6, + ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF +}; + +static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = { + mmTCP_WATCH0_ADDR_H, mmTCP_WATCH0_ADDR_L, mmTCP_WATCH0_CNTL, + mmTCP_WATCH1_ADDR_H, mmTCP_WATCH1_ADDR_L, mmTCP_WATCH1_CNTL, + mmTCP_WATCH2_ADDR_H, mmTCP_WATCH2_ADDR_L, mmTCP_WATCH2_CNTL, + mmTCP_WATCH3_ADDR_H, mmTCP_WATCH3_ADDR_L, mmTCP_WATCH3_CNTL +}; + +union TCP_WATCH_CNTL_BITS { + struct { + uint32_t mask:24; + uint32_t vmid:4; + uint32_t atc:1; + uint32_t mode:2; + uint32_t valid:1; + } bitfields, bits; + uint32_t u32All; + signed int i32All; + float f32All; +}; + +/* + * Register access functions + */ + +static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, + uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, + uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases); + +static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, + unsigned int vmid); + +static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, + uint32_t hpd_size, uint64_t hpd_gpu_addr); +static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); +static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, + uint32_t queue_id, uint32_t __user *wptr); +static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd); +static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, + uint32_t pipe_id, uint32_t queue_id); + +static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, + unsigned int timeout, uint32_t pipe_id, + uint32_t queue_id); +static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); +static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, + unsigned int timeout); +static int kgd_address_watch_disable(struct kgd_dev *kgd); +static int kgd_address_watch_execute(struct kgd_dev *kgd, + unsigned int watch_point_id, + uint32_t cntl_val, + uint32_t addr_hi, + uint32_t addr_lo); +static int kgd_wave_control_execute(struct kgd_dev *kgd, + uint32_t gfx_index_val, + uint32_t sq_cmd); +static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, + unsigned int watch_point_id, + unsigned int reg_offset); + +static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid); +static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, + uint8_t vmid); +static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid); + +static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); + +static const struct kfd2kgd_calls kfd2kgd = { + .init_gtt_mem_allocation = alloc_gtt_mem, + .free_gtt_mem = free_gtt_mem, + .get_vmem_size = get_vmem_size, + .get_gpu_clock_counter = get_gpu_clock_counter, + .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, + .program_sh_mem_settings = kgd_program_sh_mem_settings, + .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, + .init_pipeline = kgd_init_pipeline, + .init_interrupts = kgd_init_interrupts, + .hqd_load = kgd_hqd_load, + .hqd_sdma_load = kgd_hqd_sdma_load, + .hqd_is_occupied = kgd_hqd_is_occupied, + .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, + .hqd_destroy = kgd_hqd_destroy, + .hqd_sdma_destroy = kgd_hqd_sdma_destroy, + .address_watch_disable = kgd_address_watch_disable, + .address_watch_execute = kgd_address_watch_execute, + .wave_control_execute = kgd_wave_control_execute, + .address_watch_get_offset = kgd_address_watch_get_offset, + .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid, + .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid, + .write_vmid_invalidate_request = write_vmid_invalidate_request, + .get_fw_version = get_fw_version +}; + +struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions() +{ + return (struct kfd2kgd_calls *)&kfd2kgd; +} + +static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) +{ + return (struct amdgpu_device *)kgd; +} + +static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, + uint32_t queue, uint32_t vmid) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue); + + mutex_lock(&adev->srbm_mutex); + WREG32(mmSRBM_GFX_CNTL, value); +} + +static void unlock_srbm(struct kgd_dev *kgd) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + WREG32(mmSRBM_GFX_CNTL, 0); + mutex_unlock(&adev->srbm_mutex); +} + +static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, + uint32_t queue_id) +{ + uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1; + uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC); + + lock_srbm(kgd, mec, pipe, queue_id, 0); +} + +static void release_queue(struct kgd_dev *kgd) +{ + unlock_srbm(kgd); +} + +static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, + uint32_t sh_mem_config, + uint32_t sh_mem_ape1_base, + uint32_t sh_mem_ape1_limit, + uint32_t sh_mem_bases) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + lock_srbm(kgd, 0, 0, 0, vmid); + + WREG32(mmSH_MEM_CONFIG, sh_mem_config); + WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base); + WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit); + WREG32(mmSH_MEM_BASES, sh_mem_bases); + + unlock_srbm(kgd); +} + +static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, + unsigned int vmid) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + /* + * We have to assume that there is no outstanding mapping. + * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because + * a mapping is in progress or because a mapping finished and the + * SW cleared it. So the protocol is to always wait & clear. + */ + uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid | + ATC_VMID0_PASID_MAPPING__VALID_MASK; + + WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping); + + while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid))) + cpu_relax(); + WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid); + + /* Mapping vmid to pasid also for IH block */ + WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping); + + return 0; +} + +static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, + uint32_t hpd_size, uint64_t hpd_gpu_addr) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1; + uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC); + + lock_srbm(kgd, mec, pipe, 0, 0); + WREG32(mmCP_HPD_EOP_BASE_ADDR, lower_32_bits(hpd_gpu_addr >> 8)); + WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(hpd_gpu_addr >> 8)); + WREG32(mmCP_HPD_EOP_VMID, 0); + WREG32(mmCP_HPD_EOP_CONTROL, hpd_size); + unlock_srbm(kgd); + + return 0; +} + +static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t mec; + uint32_t pipe; + + mec = (pipe_id / CIK_PIPE_PER_MEC) + 1; + pipe = (pipe_id % CIK_PIPE_PER_MEC); + + lock_srbm(kgd, mec, pipe, 0, 0); + + WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | + CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); + + unlock_srbm(kgd); + + return 0; +} + +static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m) +{ + uint32_t retval; + + retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET + + m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET; + + pr_debug("kfd: sdma base address: 0x%x\n", retval); + + return retval; +} + +static inline struct cik_mqd *get_mqd(void *mqd) +{ + return (struct cik_mqd *)mqd; +} + +static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) +{ + return (struct cik_sdma_rlc_registers *)mqd; +} + +static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, + uint32_t queue_id, uint32_t __user *wptr) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t wptr_shadow, is_wptr_shadow_valid; + struct cik_mqd *m; + + m = get_mqd(mqd); + + is_wptr_shadow_valid = !get_user(wptr_shadow, wptr); + + acquire_queue(kgd, pipe_id, queue_id); + WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo); + WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi); + WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control); + + WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo); + WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi); + WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control); + + WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control); + WREG32(mmCP_HQD_IB_BASE_ADDR, m->cp_hqd_ib_base_addr_lo); + WREG32(mmCP_HQD_IB_BASE_ADDR_HI, m->cp_hqd_ib_base_addr_hi); + + WREG32(mmCP_HQD_IB_RPTR, m->cp_hqd_ib_rptr); + + WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state); + WREG32(mmCP_HQD_SEMA_CMD, m->cp_hqd_sema_cmd); + WREG32(mmCP_HQD_MSG_TYPE, m->cp_hqd_msg_type); + + WREG32(mmCP_HQD_ATOMIC0_PREOP_LO, m->cp_hqd_atomic0_preop_lo); + WREG32(mmCP_HQD_ATOMIC0_PREOP_HI, m->cp_hqd_atomic0_preop_hi); + WREG32(mmCP_HQD_ATOMIC1_PREOP_LO, m->cp_hqd_atomic1_preop_lo); + WREG32(mmCP_HQD_ATOMIC1_PREOP_HI, m->cp_hqd_atomic1_preop_hi); + + WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo); + WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI, + m->cp_hqd_pq_rptr_report_addr_hi); + + WREG32(mmCP_HQD_PQ_RPTR, m->cp_hqd_pq_rptr); + + WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, m->cp_hqd_pq_wptr_poll_addr_lo); + WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, m->cp_hqd_pq_wptr_poll_addr_hi); + + WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control); + + WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid); + + WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum); + + WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority); + WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority); + + WREG32(mmCP_HQD_IQ_RPTR, m->cp_hqd_iq_rptr); + + if (is_wptr_shadow_valid) + WREG32(mmCP_HQD_PQ_WPTR, wptr_shadow); + + WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active); + release_queue(kgd); + + return 0; +} + +static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct cik_sdma_rlc_registers *m; + uint32_t sdma_base_addr; + + m = get_sdma_mqd(mqd); + sdma_base_addr = get_sdma_base_addr(m); + + WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, + m->sdma_rlc_virtual_addr); + + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, + m->sdma_rlc_rb_base); + + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, + m->sdma_rlc_rb_base_hi); + + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, + m->sdma_rlc_rb_rptr_addr_lo); + + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, + m->sdma_rlc_rb_rptr_addr_hi); + + WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, + m->sdma_rlc_doorbell); + + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + m->sdma_rlc_rb_cntl); + + return 0; +} + +static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, + uint32_t pipe_id, uint32_t queue_id) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t act; + bool retval = false; + uint32_t low, high; + + acquire_queue(kgd, pipe_id, queue_id); + act = RREG32(mmCP_HQD_ACTIVE); + if (act) { + low = lower_32_bits(queue_address >> 8); + high = upper_32_bits(queue_address >> 8); + + if (low == RREG32(mmCP_HQD_PQ_BASE) && + high == RREG32(mmCP_HQD_PQ_BASE_HI)) + retval = true; + } + release_queue(kgd); + return retval; +} + +static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct cik_sdma_rlc_registers *m; + uint32_t sdma_base_addr; + uint32_t sdma_rlc_rb_cntl; + + m = get_sdma_mqd(mqd); + sdma_base_addr = get_sdma_base_addr(m); + + sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + + if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) + return true; + + return false; +} + +static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, + unsigned int timeout, uint32_t pipe_id, + uint32_t queue_id) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t temp; + + acquire_queue(kgd, pipe_id, queue_id); + WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0); + + WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type); + + while (true) { + temp = RREG32(mmCP_HQD_ACTIVE); + if (temp & CP_HQD_ACTIVE__ACTIVE_MASK) + break; + if (timeout == 0) { + pr_err("kfd: cp queue preemption time out (%dms)\n", + temp); + release_queue(kgd); + return -ETIME; + } + msleep(20); + timeout -= 20; + } + + release_queue(kgd); + return 0; +} + +static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, + unsigned int timeout) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct cik_sdma_rlc_registers *m; + uint32_t sdma_base_addr; + uint32_t temp; + + m = get_sdma_mqd(mqd); + sdma_base_addr = get_sdma_base_addr(m); + + temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); + + while (true) { + temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT) + break; + if (timeout == 0) + return -ETIME; + msleep(20); + timeout -= 20; + } + + WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0); + + return 0; +} + +static int kgd_address_watch_disable(struct kgd_dev *kgd) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + union TCP_WATCH_CNTL_BITS cntl; + unsigned int i; + + cntl.u32All = 0; + + cntl.bitfields.valid = 0; + cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK; + cntl.bitfields.atc = 1; + + /* Turning off this address until we set all the registers */ + for (i = 0; i < MAX_WATCH_ADDRESSES; i++) + WREG32(watchRegs[i * ADDRESS_WATCH_REG_MAX + + ADDRESS_WATCH_REG_CNTL], cntl.u32All); + + return 0; +} + +static int kgd_address_watch_execute(struct kgd_dev *kgd, + unsigned int watch_point_id, + uint32_t cntl_val, + uint32_t addr_hi, + uint32_t addr_lo) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + union TCP_WATCH_CNTL_BITS cntl; + + cntl.u32All = cntl_val; + + /* Turning off this watch point until we set all the registers */ + cntl.bitfields.valid = 0; + WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + + ADDRESS_WATCH_REG_CNTL], cntl.u32All); + + WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + + ADDRESS_WATCH_REG_ADDR_HI], addr_hi); + + WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + + ADDRESS_WATCH_REG_ADDR_LO], addr_lo); + + /* Enable the watch point */ + cntl.bitfields.valid = 1; + + WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + + ADDRESS_WATCH_REG_CNTL], cntl.u32All); + + return 0; +} + +static int kgd_wave_control_execute(struct kgd_dev *kgd, + uint32_t gfx_index_val, + uint32_t sq_cmd) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t data; + + mutex_lock(&adev->grbm_idx_mutex); + + WREG32(mmGRBM_GFX_INDEX, gfx_index_val); + WREG32(mmSQ_CMD, sq_cmd); + + /* Restore the GRBM_GFX_INDEX register */ + + data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | + GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | + GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK; + + WREG32(mmGRBM_GFX_INDEX, data); + + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} + +static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, + unsigned int watch_point_id, + unsigned int reg_offset) +{ + return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset]; +} + +static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, + uint8_t vmid) +{ + uint32_t reg; + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + + reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); + return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; +} + +static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, + uint8_t vmid) +{ + uint32_t reg; + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + + reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); + return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; +} + +static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid) +{ + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + + WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); +} + +static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) +{ + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + const union amdgpu_firmware_header *hdr; + + BUG_ON(kgd == NULL); + + switch (type) { + case KGD_ENGINE_PFP: + hdr = (const union amdgpu_firmware_header *) + adev->gfx.pfp_fw->data; + break; + + case KGD_ENGINE_ME: + hdr = (const union amdgpu_firmware_header *) + adev->gfx.me_fw->data; + break; + + case KGD_ENGINE_CE: + hdr = (const union amdgpu_firmware_header *) + adev->gfx.ce_fw->data; + break; + + case KGD_ENGINE_MEC1: + hdr = (const union amdgpu_firmware_header *) + adev->gfx.mec_fw->data; + break; + + case KGD_ENGINE_MEC2: + hdr = (const union amdgpu_firmware_header *) + adev->gfx.mec2_fw->data; + break; + + case KGD_ENGINE_RLC: + hdr = (const union amdgpu_firmware_header *) + adev->gfx.rlc_fw->data; + break; + + case KGD_ENGINE_SDMA1: + hdr = (const union amdgpu_firmware_header *) + adev->sdma[0].fw->data; + break; + + case KGD_ENGINE_SDMA2: + hdr = (const union amdgpu_firmware_header *) + adev->sdma[1].fw->data; + break; + + default: + return 0; + } + + if (hdr == NULL) + return 0; + + /* Only 12 bit in use*/ + return hdr->common.ucode_version; +} + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c new file mode 100644 index 000000000..dfd1d503b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -0,0 +1,543 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include "amdgpu.h" +#include "amdgpu_amdkfd.h" +#include "amdgpu_ucode.h" +#include "gca/gfx_8_0_sh_mask.h" +#include "gca/gfx_8_0_d.h" +#include "gca/gfx_8_0_enum.h" +#include "oss/oss_3_0_sh_mask.h" +#include "oss/oss_3_0_d.h" +#include "gmc/gmc_8_1_sh_mask.h" +#include "gmc/gmc_8_1_d.h" +#include "vi_structs.h" +#include "vid.h" + +#define VI_PIPE_PER_MEC (4) + +struct cik_sdma_rlc_registers; + +/* + * Register access functions + */ + +static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, + uint32_t sh_mem_config, + uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, + uint32_t sh_mem_bases); +static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, + unsigned int vmid); +static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, + uint32_t hpd_size, uint64_t hpd_gpu_addr); +static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); +static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, + uint32_t queue_id, uint32_t __user *wptr); +static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd); +static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, + uint32_t pipe_id, uint32_t queue_id); +static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); +static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, + unsigned int timeout, uint32_t pipe_id, + uint32_t queue_id); +static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, + unsigned int timeout); +static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid); +static int kgd_address_watch_disable(struct kgd_dev *kgd); +static int kgd_address_watch_execute(struct kgd_dev *kgd, + unsigned int watch_point_id, + uint32_t cntl_val, + uint32_t addr_hi, + uint32_t addr_lo); +static int kgd_wave_control_execute(struct kgd_dev *kgd, + uint32_t gfx_index_val, + uint32_t sq_cmd); +static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, + unsigned int watch_point_id, + unsigned int reg_offset); + +static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, + uint8_t vmid); +static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, + uint8_t vmid); +static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid); +static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); + +static const struct kfd2kgd_calls kfd2kgd = { + .init_gtt_mem_allocation = alloc_gtt_mem, + .free_gtt_mem = free_gtt_mem, + .get_vmem_size = get_vmem_size, + .get_gpu_clock_counter = get_gpu_clock_counter, + .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, + .program_sh_mem_settings = kgd_program_sh_mem_settings, + .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, + .init_pipeline = kgd_init_pipeline, + .init_interrupts = kgd_init_interrupts, + .hqd_load = kgd_hqd_load, + .hqd_sdma_load = kgd_hqd_sdma_load, + .hqd_is_occupied = kgd_hqd_is_occupied, + .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, + .hqd_destroy = kgd_hqd_destroy, + .hqd_sdma_destroy = kgd_hqd_sdma_destroy, + .address_watch_disable = kgd_address_watch_disable, + .address_watch_execute = kgd_address_watch_execute, + .wave_control_execute = kgd_wave_control_execute, + .address_watch_get_offset = kgd_address_watch_get_offset, + .get_atc_vmid_pasid_mapping_pasid = + get_atc_vmid_pasid_mapping_pasid, + .get_atc_vmid_pasid_mapping_valid = + get_atc_vmid_pasid_mapping_valid, + .write_vmid_invalidate_request = write_vmid_invalidate_request, + .get_fw_version = get_fw_version +}; + +struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions() +{ + return (struct kfd2kgd_calls *)&kfd2kgd; +} + +static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) +{ + return (struct amdgpu_device *)kgd; +} + +static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, + uint32_t queue, uint32_t vmid) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue); + + mutex_lock(&adev->srbm_mutex); + WREG32(mmSRBM_GFX_CNTL, value); +} + +static void unlock_srbm(struct kgd_dev *kgd) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + WREG32(mmSRBM_GFX_CNTL, 0); + mutex_unlock(&adev->srbm_mutex); +} + +static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, + uint32_t queue_id) +{ + uint32_t mec = (++pipe_id / VI_PIPE_PER_MEC) + 1; + uint32_t pipe = (pipe_id % VI_PIPE_PER_MEC); + + lock_srbm(kgd, mec, pipe, queue_id, 0); +} + +static void release_queue(struct kgd_dev *kgd) +{ + unlock_srbm(kgd); +} + +static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, + uint32_t sh_mem_config, + uint32_t sh_mem_ape1_base, + uint32_t sh_mem_ape1_limit, + uint32_t sh_mem_bases) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + lock_srbm(kgd, 0, 0, 0, vmid); + + WREG32(mmSH_MEM_CONFIG, sh_mem_config); + WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base); + WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit); + WREG32(mmSH_MEM_BASES, sh_mem_bases); + + unlock_srbm(kgd); +} + +static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, + unsigned int vmid) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + /* + * We have to assume that there is no outstanding mapping. + * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because + * a mapping is in progress or because a mapping finished + * and the SW cleared it. + * So the protocol is to always wait & clear. + */ + uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid | + ATC_VMID0_PASID_MAPPING__VALID_MASK; + + WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping); + + while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid))) + cpu_relax(); + WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid); + + /* Mapping vmid to pasid also for IH block */ + WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping); + + return 0; +} + +static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, + uint32_t hpd_size, uint64_t hpd_gpu_addr) +{ + return 0; +} + +static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t mec; + uint32_t pipe; + + mec = (++pipe_id / VI_PIPE_PER_MEC) + 1; + pipe = (pipe_id % VI_PIPE_PER_MEC); + + lock_srbm(kgd, mec, pipe, 0, 0); + + WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK); + + unlock_srbm(kgd); + + return 0; +} + +static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m) +{ + return 0; +} + +static inline struct vi_mqd *get_mqd(void *mqd) +{ + return (struct vi_mqd *)mqd; +} + +static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) +{ + return (struct cik_sdma_rlc_registers *)mqd; +} + +static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, + uint32_t queue_id, uint32_t __user *wptr) +{ + struct vi_mqd *m; + uint32_t shadow_wptr, valid_wptr; + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + m = get_mqd(mqd); + + valid_wptr = copy_from_user(&shadow_wptr, wptr, sizeof(shadow_wptr)); + acquire_queue(kgd, pipe_id, queue_id); + + WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control); + WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo); + WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi); + + WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid); + WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state); + WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority); + WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority); + WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum); + WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo); + WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi); + WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo); + WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI, + m->cp_hqd_pq_rptr_report_addr_hi); + + if (valid_wptr > 0) + WREG32(mmCP_HQD_PQ_WPTR, shadow_wptr); + + WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control); + WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control); + + WREG32(mmCP_HQD_EOP_BASE_ADDR, m->cp_hqd_eop_base_addr_lo); + WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, m->cp_hqd_eop_base_addr_hi); + WREG32(mmCP_HQD_EOP_CONTROL, m->cp_hqd_eop_control); + WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr); + WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr); + WREG32(mmCP_HQD_EOP_EVENTS, m->cp_hqd_eop_done_events); + + WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO, m->cp_hqd_ctx_save_base_addr_lo); + WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI, m->cp_hqd_ctx_save_base_addr_hi); + WREG32(mmCP_HQD_CTX_SAVE_CONTROL, m->cp_hqd_ctx_save_control); + WREG32(mmCP_HQD_CNTL_STACK_OFFSET, m->cp_hqd_cntl_stack_offset); + WREG32(mmCP_HQD_CNTL_STACK_SIZE, m->cp_hqd_cntl_stack_size); + WREG32(mmCP_HQD_WG_STATE_OFFSET, m->cp_hqd_wg_state_offset); + WREG32(mmCP_HQD_CTX_SAVE_SIZE, m->cp_hqd_ctx_save_size); + + WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control); + + WREG32(mmCP_HQD_DEQUEUE_REQUEST, m->cp_hqd_dequeue_request); + WREG32(mmCP_HQD_ERROR, m->cp_hqd_error); + WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem); + WREG32(mmCP_HQD_EOP_DONES, m->cp_hqd_eop_dones); + + WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active); + + release_queue(kgd); + + return 0; +} + +static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd) +{ + return 0; +} + +static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, + uint32_t pipe_id, uint32_t queue_id) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t act; + bool retval = false; + uint32_t low, high; + + acquire_queue(kgd, pipe_id, queue_id); + act = RREG32(mmCP_HQD_ACTIVE); + if (act) { + low = lower_32_bits(queue_address >> 8); + high = upper_32_bits(queue_address >> 8); + + if (low == RREG32(mmCP_HQD_PQ_BASE) && + high == RREG32(mmCP_HQD_PQ_BASE_HI)) + retval = true; + } + release_queue(kgd); + return retval; +} + +static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct cik_sdma_rlc_registers *m; + uint32_t sdma_base_addr; + uint32_t sdma_rlc_rb_cntl; + + m = get_sdma_mqd(mqd); + sdma_base_addr = get_sdma_base_addr(m); + + sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + + if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) + return true; + + return false; +} + +static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, + unsigned int timeout, uint32_t pipe_id, + uint32_t queue_id) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t temp; + + acquire_queue(kgd, pipe_id, queue_id); + + WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type); + + while (true) { + temp = RREG32(mmCP_HQD_ACTIVE); + if (temp & CP_HQD_ACTIVE__ACTIVE_MASK) + break; + if (timeout == 0) { + pr_err("kfd: cp queue preemption time out (%dms)\n", + temp); + release_queue(kgd); + return -ETIME; + } + msleep(20); + timeout -= 20; + } + + release_queue(kgd); + return 0; +} + +static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, + unsigned int timeout) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct cik_sdma_rlc_registers *m; + uint32_t sdma_base_addr; + uint32_t temp; + + m = get_sdma_mqd(mqd); + sdma_base_addr = get_sdma_base_addr(m); + + temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); + + while (true) { + temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT) + break; + if (timeout == 0) + return -ETIME; + msleep(20); + timeout -= 20; + } + + WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0); + + return 0; +} + +static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, + uint8_t vmid) +{ + uint32_t reg; + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + + reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); + return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; +} + +static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, + uint8_t vmid) +{ + uint32_t reg; + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + + reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); + return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; +} + +static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid) +{ + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + + WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); +} + +static int kgd_address_watch_disable(struct kgd_dev *kgd) +{ + return 0; +} + +static int kgd_address_watch_execute(struct kgd_dev *kgd, + unsigned int watch_point_id, + uint32_t cntl_val, + uint32_t addr_hi, + uint32_t addr_lo) +{ + return 0; +} + +static int kgd_wave_control_execute(struct kgd_dev *kgd, + uint32_t gfx_index_val, + uint32_t sq_cmd) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t data = 0; + + mutex_lock(&adev->grbm_idx_mutex); + + WREG32(mmGRBM_GFX_INDEX, gfx_index_val); + WREG32(mmSQ_CMD, sq_cmd); + + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, + INSTANCE_BROADCAST_WRITES, 1); + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, + SH_BROADCAST_WRITES, 1); + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, + SE_BROADCAST_WRITES, 1); + + WREG32(mmGRBM_GFX_INDEX, data); + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} + +static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, + unsigned int watch_point_id, + unsigned int reg_offset) +{ + return 0; +} + +static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) +{ + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + const union amdgpu_firmware_header *hdr; + + BUG_ON(kgd == NULL); + + switch (type) { + case KGD_ENGINE_PFP: + hdr = (const union amdgpu_firmware_header *) + adev->gfx.pfp_fw->data; + break; + + case KGD_ENGINE_ME: + hdr = (const union amdgpu_firmware_header *) + adev->gfx.me_fw->data; + break; + + case KGD_ENGINE_CE: + hdr = (const union amdgpu_firmware_header *) + adev->gfx.ce_fw->data; + break; + + case KGD_ENGINE_MEC1: + hdr = (const union amdgpu_firmware_header *) + adev->gfx.mec_fw->data; + break; + + case KGD_ENGINE_MEC2: + hdr = (const union amdgpu_firmware_header *) + adev->gfx.mec2_fw->data; + break; + + case KGD_ENGINE_RLC: + hdr = (const union amdgpu_firmware_header *) + adev->gfx.rlc_fw->data; + break; + + case KGD_ENGINE_SDMA1: + hdr = (const union amdgpu_firmware_header *) + adev->sdma[0].fw->data; + break; + + case KGD_ENGINE_SDMA2: + hdr = (const union amdgpu_firmware_header *) + adev->sdma[1].fw->data; + break; + + default: + return 0; + } + + if (hdr == NULL) + return 0; + + /* Only 12 bit in use*/ + return hdr->common.ucode_version; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 6a588371d..9416e0f5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -672,8 +672,12 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) /* disp clock */ adev->clock.default_dispclk = le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); - if (adev->clock.default_dispclk == 0) - adev->clock.default_dispclk = 54000; /* 540 Mhz */ + /* set a reasonable default for DP */ + if (adev->clock.default_dispclk < 53900) { + DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", + adev->clock.default_dispclk / 100); + adev->clock.default_dispclk = 60000; + } adev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); adev->clock.current_dispclk = adev->clock.default_dispclk; @@ -897,7 +901,7 @@ bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev, if ((id == ASIC_INTERNAL_ENGINE_SS) || (id == ASIC_INTERNAL_MEMORY_SS)) ss->rate /= 100; - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) amdgpu_atombios_get_igp_ss_overrides(adev, ss, id); return true; } @@ -1058,7 +1062,7 @@ void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev, SET_MEMORY_CLOCK_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock); - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) return; args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 2742b9a35..cd639c362 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, { unsigned long start_jiffies; unsigned long end_jiffies; - struct amdgpu_fence *fence = NULL; + struct fence *fence = NULL; int i, r; start_jiffies = jiffies; @@ -42,17 +42,17 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence); if (r) goto exit_do_move; - r = amdgpu_fence_wait(fence, false); + r = fence_wait(fence, false); if (r) goto exit_do_move; - amdgpu_fence_unref(&fence); + fence_put(fence); } end_jiffies = jiffies; r = jiffies_to_msecs(end_jiffies - start_jiffies); exit_do_move: if (fence) - amdgpu_fence_unref(&fence); + fence_put(fence); return r; } @@ -79,7 +79,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, int time; n = AMDGPU_BENCHMARK_ITERATIONS; - r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj); + r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, + NULL, &sobj); if (r) { goto out_cleanup; } @@ -91,7 +92,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, if (r) { goto out_cleanup; } - r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj); + r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, + NULL, &dobj); if (r) { goto out_cleanup; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index ceb444f6d..02add0a50 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -48,7 +48,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev) resource_size_t vram_base; resource_size_t size = 256 * 1024; /* ??? */ - if (!(adev->flags & AMDGPU_IS_APU)) + if (!(adev->flags & AMD_IS_APU)) if (!amdgpu_card_posted(adev)) return false; @@ -184,7 +184,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) bool found = false; /* ATRM is for the discrete card only */ - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) return false; while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { @@ -246,7 +246,7 @@ static inline bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev) { - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) return igp_read_bios_from_vram(adev); else return amdgpu_asic_read_disabled_bios(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c new file mode 100644 index 000000000..f01b00df3 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -0,0 +1,800 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#include +#include +#include +#include +#include +#include +#include "amdgpu.h" +#include "cgs_linux.h" +#include "atom.h" +#include "amdgpu_ucode.h" + + +struct amdgpu_cgs_device { + struct cgs_device base; + struct amdgpu_device *adev; +}; + +#define CGS_FUNC_ADEV \ + struct amdgpu_device *adev = \ + ((struct amdgpu_cgs_device *)cgs_device)->adev + +static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type, + uint64_t *mc_start, uint64_t *mc_size, + uint64_t *mem_size) +{ + CGS_FUNC_ADEV; + switch(type) { + case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB: + case CGS_GPU_MEM_TYPE__VISIBLE_FB: + *mc_start = 0; + *mc_size = adev->mc.visible_vram_size; + *mem_size = adev->mc.visible_vram_size - adev->vram_pin_size; + break; + case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: + case CGS_GPU_MEM_TYPE__INVISIBLE_FB: + *mc_start = adev->mc.visible_vram_size; + *mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size; + *mem_size = *mc_size; + break; + case CGS_GPU_MEM_TYPE__GART_CACHEABLE: + case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE: + *mc_start = adev->mc.gtt_start; + *mc_size = adev->mc.gtt_size; + *mem_size = adev->mc.gtt_size - adev->gart_pin_size; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem, + uint64_t size, + uint64_t min_offset, uint64_t max_offset, + cgs_handle_t *kmem_handle, uint64_t *mcaddr) +{ + CGS_FUNC_ADEV; + int ret; + struct amdgpu_bo *bo; + struct page *kmem_page = vmalloc_to_page(kmem); + int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT; + + struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages); + ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false, + AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo); + if (ret) + return ret; + ret = amdgpu_bo_reserve(bo, false); + if (unlikely(ret != 0)) + return ret; + + /* pin buffer into GTT */ + ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT, + min_offset, max_offset, mcaddr); + amdgpu_bo_unreserve(bo); + + *kmem_handle = (cgs_handle_t)bo; + return ret; +} + +static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle) +{ + struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle; + + if (obj) { + int r = amdgpu_bo_reserve(obj, false); + if (likely(r == 0)) { + amdgpu_bo_unpin(obj); + amdgpu_bo_unreserve(obj); + } + amdgpu_bo_unref(&obj); + + } + return 0; +} + +static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device, + enum cgs_gpu_mem_type type, + uint64_t size, uint64_t align, + uint64_t min_offset, uint64_t max_offset, + cgs_handle_t *handle) +{ + CGS_FUNC_ADEV; + uint16_t flags = 0; + int ret = 0; + uint32_t domain = 0; + struct amdgpu_bo *obj; + struct ttm_placement placement; + struct ttm_place place; + + if (min_offset > max_offset) { + BUG_ON(1); + return -EINVAL; + } + + /* fail if the alignment is not a power of 2 */ + if (((align != 1) && (align & (align - 1))) + || size == 0 || align == 0) + return -EINVAL; + + + switch(type) { + case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB: + case CGS_GPU_MEM_TYPE__VISIBLE_FB: + flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + domain = AMDGPU_GEM_DOMAIN_VRAM; + if (max_offset > adev->mc.real_vram_size) + return -EINVAL; + place.fpfn = min_offset >> PAGE_SHIFT; + place.lpfn = max_offset >> PAGE_SHIFT; + place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_VRAM; + break; + case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: + case CGS_GPU_MEM_TYPE__INVISIBLE_FB: + flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS; + domain = AMDGPU_GEM_DOMAIN_VRAM; + if (adev->mc.visible_vram_size < adev->mc.real_vram_size) { + place.fpfn = + max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT; + place.lpfn = + min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT; + place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_VRAM; + } + + break; + case CGS_GPU_MEM_TYPE__GART_CACHEABLE: + domain = AMDGPU_GEM_DOMAIN_GTT; + place.fpfn = min_offset >> PAGE_SHIFT; + place.lpfn = max_offset >> PAGE_SHIFT; + place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; + break; + case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE: + flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; + domain = AMDGPU_GEM_DOMAIN_GTT; + place.fpfn = min_offset >> PAGE_SHIFT; + place.lpfn = max_offset >> PAGE_SHIFT; + place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT | + TTM_PL_FLAG_UNCACHED; + break; + default: + return -EINVAL; + } + + + *handle = 0; + + placement.placement = &place; + placement.num_placement = 1; + placement.busy_placement = &place; + placement.num_busy_placement = 1; + + ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE, + true, domain, flags, + NULL, &placement, NULL, + &obj); + if (ret) { + DRM_ERROR("(%d) bo create failed\n", ret); + return ret; + } + *handle = (cgs_handle_t)obj; + + return ret; +} + +static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle) +{ + struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; + + if (obj) { + int r = amdgpu_bo_reserve(obj, false); + if (likely(r == 0)) { + amdgpu_bo_kunmap(obj); + amdgpu_bo_unpin(obj); + amdgpu_bo_unreserve(obj); + } + amdgpu_bo_unref(&obj); + + } + return 0; +} + +static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle, + uint64_t *mcaddr) +{ + int r; + u64 min_offset, max_offset; + struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; + + WARN_ON_ONCE(obj->placement.num_placement > 1); + + min_offset = obj->placements[0].fpfn << PAGE_SHIFT; + max_offset = obj->placements[0].lpfn << PAGE_SHIFT; + + r = amdgpu_bo_reserve(obj, false); + if (unlikely(r != 0)) + return r; + r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT, + min_offset, max_offset, mcaddr); + amdgpu_bo_unreserve(obj); + return r; +} + +static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle) +{ + int r; + struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; + r = amdgpu_bo_reserve(obj, false); + if (unlikely(r != 0)) + return r; + r = amdgpu_bo_unpin(obj); + amdgpu_bo_unreserve(obj); + return r; +} + +static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle, + void **map) +{ + int r; + struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; + r = amdgpu_bo_reserve(obj, false); + if (unlikely(r != 0)) + return r; + r = amdgpu_bo_kmap(obj, map); + amdgpu_bo_unreserve(obj); + return r; +} + +static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle) +{ + int r; + struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; + r = amdgpu_bo_reserve(obj, false); + if (unlikely(r != 0)) + return r; + amdgpu_bo_kunmap(obj); + amdgpu_bo_unreserve(obj); + return r; +} + +static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset) +{ + CGS_FUNC_ADEV; + return RREG32(offset); +} + +static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset, + uint32_t value) +{ + CGS_FUNC_ADEV; + WREG32(offset, value); +} + +static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device, + enum cgs_ind_reg space, + unsigned index) +{ + CGS_FUNC_ADEV; + switch (space) { + case CGS_IND_REG__MMIO: + return RREG32_IDX(index); + case CGS_IND_REG__PCIE: + return RREG32_PCIE(index); + case CGS_IND_REG__SMC: + return RREG32_SMC(index); + case CGS_IND_REG__UVD_CTX: + return RREG32_UVD_CTX(index); + case CGS_IND_REG__DIDT: + return RREG32_DIDT(index); + case CGS_IND_REG__AUDIO_ENDPT: + DRM_ERROR("audio endpt register access not implemented.\n"); + return 0; + } + WARN(1, "Invalid indirect register space"); + return 0; +} + +static void amdgpu_cgs_write_ind_register(void *cgs_device, + enum cgs_ind_reg space, + unsigned index, uint32_t value) +{ + CGS_FUNC_ADEV; + switch (space) { + case CGS_IND_REG__MMIO: + return WREG32_IDX(index, value); + case CGS_IND_REG__PCIE: + return WREG32_PCIE(index, value); + case CGS_IND_REG__SMC: + return WREG32_SMC(index, value); + case CGS_IND_REG__UVD_CTX: + return WREG32_UVD_CTX(index, value); + case CGS_IND_REG__DIDT: + return WREG32_DIDT(index, value); + case CGS_IND_REG__AUDIO_ENDPT: + DRM_ERROR("audio endpt register access not implemented.\n"); + return; + } + WARN(1, "Invalid indirect register space"); +} + +static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr) +{ + CGS_FUNC_ADEV; + uint8_t val; + int ret = pci_read_config_byte(adev->pdev, addr, &val); + if (WARN(ret, "pci_read_config_byte error")) + return 0; + return val; +} + +static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr) +{ + CGS_FUNC_ADEV; + uint16_t val; + int ret = pci_read_config_word(adev->pdev, addr, &val); + if (WARN(ret, "pci_read_config_word error")) + return 0; + return val; +} + +static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device, + unsigned addr) +{ + CGS_FUNC_ADEV; + uint32_t val; + int ret = pci_read_config_dword(adev->pdev, addr, &val); + if (WARN(ret, "pci_read_config_dword error")) + return 0; + return val; +} + +static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr, + uint8_t value) +{ + CGS_FUNC_ADEV; + int ret = pci_write_config_byte(adev->pdev, addr, value); + WARN(ret, "pci_write_config_byte error"); +} + +static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr, + uint16_t value) +{ + CGS_FUNC_ADEV; + int ret = pci_write_config_word(adev->pdev, addr, value); + WARN(ret, "pci_write_config_word error"); +} + +static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr, + uint32_t value) +{ + CGS_FUNC_ADEV; + int ret = pci_write_config_dword(adev->pdev, addr, value); + WARN(ret, "pci_write_config_dword error"); +} + +static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device, + unsigned table, uint16_t *size, + uint8_t *frev, uint8_t *crev) +{ + CGS_FUNC_ADEV; + uint16_t data_start; + + if (amdgpu_atom_parse_data_header( + adev->mode_info.atom_context, table, size, + frev, crev, &data_start)) + return (uint8_t*)adev->mode_info.atom_context->bios + + data_start; + + return NULL; +} + +static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table, + uint8_t *frev, uint8_t *crev) +{ + CGS_FUNC_ADEV; + + if (amdgpu_atom_parse_cmd_header( + adev->mode_info.atom_context, table, + frev, crev)) + return 0; + + return -EINVAL; +} + +static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table, + void *args) +{ + CGS_FUNC_ADEV; + + return amdgpu_atom_execute_table( + adev->mode_info.atom_context, table, args); +} + +static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request) +{ + /* TODO */ + return 0; +} + +static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request) +{ + /* TODO */ + return 0; +} + +static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request, + int active) +{ + /* TODO */ + return 0; +} + +static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request, + enum cgs_clock clock, unsigned freq) +{ + /* TODO */ + return 0; +} + +static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request, + enum cgs_engine engine, int powered) +{ + /* TODO */ + return 0; +} + + + +static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device, + enum cgs_clock clock, + struct cgs_clock_limits *limits) +{ + /* TODO */ + return 0; +} + +static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask, + const uint32_t *voltages) +{ + DRM_ERROR("not implemented"); + return -EPERM; +} + +struct cgs_irq_params { + unsigned src_id; + cgs_irq_source_set_func_t set; + cgs_irq_handler_func_t handler; + void *private_data; +}; + +static int cgs_set_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + struct cgs_irq_params *irq_params = + (struct cgs_irq_params *)src->data; + if (!irq_params) + return -EINVAL; + if (!irq_params->set) + return -EINVAL; + return irq_params->set(irq_params->private_data, + irq_params->src_id, + type, + (int)state); +} + +static int cgs_process_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + struct cgs_irq_params *irq_params = + (struct cgs_irq_params *)source->data; + if (!irq_params) + return -EINVAL; + if (!irq_params->handler) + return -EINVAL; + return irq_params->handler(irq_params->private_data, + irq_params->src_id, + entry->iv_entry); +} + +static const struct amdgpu_irq_src_funcs cgs_irq_funcs = { + .set = cgs_set_irq_state, + .process = cgs_process_irq, +}; + +static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id, + unsigned num_types, + cgs_irq_source_set_func_t set, + cgs_irq_handler_func_t handler, + void *private_data) +{ + CGS_FUNC_ADEV; + int ret = 0; + struct cgs_irq_params *irq_params; + struct amdgpu_irq_src *source = + kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL); + if (!source) + return -ENOMEM; + irq_params = + kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL); + if (!irq_params) { + kfree(source); + return -ENOMEM; + } + source->num_types = num_types; + source->funcs = &cgs_irq_funcs; + irq_params->src_id = src_id; + irq_params->set = set; + irq_params->handler = handler; + irq_params->private_data = private_data; + source->data = (void *)irq_params; + ret = amdgpu_irq_add_id(adev, src_id, source); + if (ret) { + kfree(irq_params); + kfree(source); + } + + return ret; +} + +static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type) +{ + CGS_FUNC_ADEV; + return amdgpu_irq_get(adev, adev->irq.sources[src_id], type); +} + +static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type) +{ + CGS_FUNC_ADEV; + return amdgpu_irq_put(adev, adev->irq.sources[src_id], type); +} + +int amdgpu_cgs_set_clockgating_state(void *cgs_device, + enum amd_ip_block_type block_type, + enum amd_clockgating_state state) +{ + CGS_FUNC_ADEV; + int i, r = -1; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + + if (adev->ip_blocks[i].type == block_type) { + r = adev->ip_blocks[i].funcs->set_clockgating_state( + (void *)adev, + state); + break; + } + } + return r; +} + +int amdgpu_cgs_set_powergating_state(void *cgs_device, + enum amd_ip_block_type block_type, + enum amd_powergating_state state) +{ + CGS_FUNC_ADEV; + int i, r = -1; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + + if (adev->ip_blocks[i].type == block_type) { + r = adev->ip_blocks[i].funcs->set_powergating_state( + (void *)adev, + state); + break; + } + } + return r; +} + + +static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type) +{ + CGS_FUNC_ADEV; + enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM; + + switch (fw_type) { + case CGS_UCODE_ID_SDMA0: + result = AMDGPU_UCODE_ID_SDMA0; + break; + case CGS_UCODE_ID_SDMA1: + result = AMDGPU_UCODE_ID_SDMA1; + break; + case CGS_UCODE_ID_CP_CE: + result = AMDGPU_UCODE_ID_CP_CE; + break; + case CGS_UCODE_ID_CP_PFP: + result = AMDGPU_UCODE_ID_CP_PFP; + break; + case CGS_UCODE_ID_CP_ME: + result = AMDGPU_UCODE_ID_CP_ME; + break; + case CGS_UCODE_ID_CP_MEC: + case CGS_UCODE_ID_CP_MEC_JT1: + result = AMDGPU_UCODE_ID_CP_MEC1; + break; + case CGS_UCODE_ID_CP_MEC_JT2: + if (adev->asic_type == CHIP_TONGA) + result = AMDGPU_UCODE_ID_CP_MEC2; + else if (adev->asic_type == CHIP_CARRIZO) + result = AMDGPU_UCODE_ID_CP_MEC1; + break; + case CGS_UCODE_ID_RLC_G: + result = AMDGPU_UCODE_ID_RLC_G; + break; + default: + DRM_ERROR("Firmware type not supported\n"); + } + return result; +} + +static int amdgpu_cgs_get_firmware_info(void *cgs_device, + enum cgs_ucode_id type, + struct cgs_firmware_info *info) +{ + CGS_FUNC_ADEV; + + if (CGS_UCODE_ID_SMU != type) { + uint64_t gpu_addr; + uint32_t data_size; + const struct gfx_firmware_header_v1_0 *header; + enum AMDGPU_UCODE_ID id; + struct amdgpu_firmware_info *ucode; + + id = fw_type_convert(cgs_device, type); + ucode = &adev->firmware.ucode[id]; + if (ucode->fw == NULL) + return -EINVAL; + + gpu_addr = ucode->mc_addr; + header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; + data_size = le32_to_cpu(header->header.ucode_size_bytes); + + if ((type == CGS_UCODE_ID_CP_MEC_JT1) || + (type == CGS_UCODE_ID_CP_MEC_JT2)) { + gpu_addr += le32_to_cpu(header->jt_offset) << 2; + data_size = le32_to_cpu(header->jt_size) << 2; + } + info->mc_addr = gpu_addr; + info->image_size = data_size; + info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); + info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); + } else { + char fw_name[30] = {0}; + int err = 0; + uint32_t ucode_size; + uint32_t ucode_start_address; + const uint8_t *src; + const struct smc_firmware_header_v1_0 *hdr; + + switch (adev->asic_type) { + case CHIP_TONGA: + strcpy(fw_name, "/*(DEBLOBBED)*/"); + break; + default: + DRM_ERROR("SMC firmware not supported\n"); + return -EINVAL; + } + + err = reject_firmware(&adev->pm.fw, fw_name, adev->dev); + if (err) { + DRM_ERROR("Failed to request firmware\n"); + return err; + } + + err = amdgpu_ucode_validate(adev->pm.fw); + if (err) { + DRM_ERROR("Failed to load firmware \"%s\"", fw_name); + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + return err; + } + + hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; + adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); + ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); + ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); + src = (const uint8_t *)(adev->pm.fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + + info->version = adev->pm.fw_version; + info->image_size = ucode_size; + info->kptr = (void *)src; + } + return 0; +} + +static const struct cgs_ops amdgpu_cgs_ops = { + amdgpu_cgs_gpu_mem_info, + amdgpu_cgs_gmap_kmem, + amdgpu_cgs_gunmap_kmem, + amdgpu_cgs_alloc_gpu_mem, + amdgpu_cgs_free_gpu_mem, + amdgpu_cgs_gmap_gpu_mem, + amdgpu_cgs_gunmap_gpu_mem, + amdgpu_cgs_kmap_gpu_mem, + amdgpu_cgs_kunmap_gpu_mem, + amdgpu_cgs_read_register, + amdgpu_cgs_write_register, + amdgpu_cgs_read_ind_register, + amdgpu_cgs_write_ind_register, + amdgpu_cgs_read_pci_config_byte, + amdgpu_cgs_read_pci_config_word, + amdgpu_cgs_read_pci_config_dword, + amdgpu_cgs_write_pci_config_byte, + amdgpu_cgs_write_pci_config_word, + amdgpu_cgs_write_pci_config_dword, + amdgpu_cgs_atom_get_data_table, + amdgpu_cgs_atom_get_cmd_table_revs, + amdgpu_cgs_atom_exec_cmd_table, + amdgpu_cgs_create_pm_request, + amdgpu_cgs_destroy_pm_request, + amdgpu_cgs_set_pm_request, + amdgpu_cgs_pm_request_clock, + amdgpu_cgs_pm_request_engine, + amdgpu_cgs_pm_query_clock_limits, + amdgpu_cgs_set_camera_voltages, + amdgpu_cgs_get_firmware_info, + amdgpu_cgs_set_powergating_state, + amdgpu_cgs_set_clockgating_state +}; + +static const struct cgs_os_ops amdgpu_cgs_os_ops = { + amdgpu_cgs_add_irq_source, + amdgpu_cgs_irq_get, + amdgpu_cgs_irq_put +}; + +void *amdgpu_cgs_create_device(struct amdgpu_device *adev) +{ + struct amdgpu_cgs_device *cgs_device = + kmalloc(sizeof(*cgs_device), GFP_KERNEL); + + if (!cgs_device) { + DRM_ERROR("Couldn't allocate CGS device structure\n"); + return NULL; + } + + cgs_device->base.ops = &amdgpu_cgs_ops; + cgs_device->base.os_ops = &amdgpu_cgs_os_ops; + cgs_device->adev = adev; + + return cgs_device; +} + +void amdgpu_cgs_destroy_device(void *cgs_device) +{ + kfree(cgs_device); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 1f040d85a..fd16652aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -126,46 +126,70 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, return 0; } +struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, + struct drm_file *filp, + struct amdgpu_ctx *ctx, + struct amdgpu_ib *ibs, + uint32_t num_ibs) +{ + struct amdgpu_cs_parser *parser; + int i; + + parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL); + if (!parser) + return NULL; + + parser->adev = adev; + parser->filp = filp; + parser->ctx = ctx; + parser->ibs = ibs; + parser->num_ibs = num_ibs; + for (i = 0; i < num_ibs; i++) + ibs[i].ctx = ctx; + + return parser; +} + int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) { union drm_amdgpu_cs *cs = data; uint64_t *chunk_array_user; - uint64_t *chunk_array = NULL; + uint64_t *chunk_array; struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - unsigned size, i; - int r = 0; + unsigned size; + int i; + int ret; - if (!cs->in.num_chunks) - goto out; + if (cs->in.num_chunks == 0) + return 0; + + chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); + if (!chunk_array) + return -ENOMEM; p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); if (!p->ctx) { - r = -EINVAL; - goto out; + ret = -EINVAL; + goto free_chunk; } + p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); /* get chunks */ INIT_LIST_HEAD(&p->validated); - chunk_array = kcalloc(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); - if (chunk_array == NULL) { - r = -ENOMEM; - goto out; - } - - chunk_array_user = (uint64_t *)(unsigned long)(cs->in.chunks); + chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks); if (copy_from_user(chunk_array, chunk_array_user, sizeof(uint64_t)*cs->in.num_chunks)) { - r = -EFAULT; - goto out; + ret = -EFAULT; + goto put_bo_list; } p->nchunks = cs->in.num_chunks; - p->chunks = kcalloc(p->nchunks, sizeof(struct amdgpu_cs_chunk), + p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), GFP_KERNEL); - if (p->chunks == NULL) { - r = -ENOMEM; - goto out; + if (!p->chunks) { + ret = -ENOMEM; + goto put_bo_list; } for (i = 0; i < p->nchunks; i++) { @@ -176,8 +200,9 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) chunk_ptr = (void __user *)(unsigned long)chunk_array[i]; if (copy_from_user(&user_chunk, chunk_ptr, sizeof(struct drm_amdgpu_cs_chunk))) { - r = -EFAULT; - goto out; + ret = -EFAULT; + i--; + goto free_partial_kdata; } p->chunks[i].chunk_id = user_chunk.chunk_id; p->chunks[i].length_dw = user_chunk.length_dw; @@ -188,13 +213,14 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); if (p->chunks[i].kdata == NULL) { - r = -ENOMEM; - goto out; + ret = -ENOMEM; + i--; + goto free_partial_kdata; } size *= sizeof(uint32_t); if (copy_from_user(p->chunks[i].kdata, cdata, size)) { - r = -EFAULT; - goto out; + ret = -EFAULT; + goto free_partial_kdata; } switch (p->chunks[i].chunk_id) { @@ -214,15 +240,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) gobj = drm_gem_object_lookup(p->adev->ddev, p->filp, handle); if (gobj == NULL) { - r = -EINVAL; - goto out; + ret = -EINVAL; + goto free_partial_kdata; } p->uf.bo = gem_to_amdgpu_bo(gobj); p->uf.offset = fence_data->offset; } else { - r = -EINVAL; - goto out; + ret = -EINVAL; + goto free_partial_kdata; } break; @@ -230,20 +256,35 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) break; default: - r = -EINVAL; - goto out; + ret = -EINVAL; + goto free_partial_kdata; } } + p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL); if (!p->ibs) { - r = -ENOMEM; - goto out; + ret = -ENOMEM; + goto free_all_kdata; } -out: kfree(chunk_array); - return r; + return 0; + +free_all_kdata: + i = p->nchunks - 1; +free_partial_kdata: + for (; i >= 0; i--) + drm_free_large(p->chunks[i].kdata); + kfree(p->chunks); +put_bo_list: + if (p->bo_list) + amdgpu_bo_list_put(p->bo_list); + amdgpu_ctx_put(p->ctx); +free_chunk: + kfree(chunk_array); + + return ret; } /* Returns how many bytes TTM can move per IB. @@ -298,25 +339,17 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) return max(bytes_moved_threshold, 1024*1024ull); } -int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p) +int amdgpu_cs_list_validate(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct list_head *validated) { - struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - struct amdgpu_vm *vm = &fpriv->vm; - struct amdgpu_device *adev = p->adev; struct amdgpu_bo_list_entry *lobj; - struct list_head duplicates; struct amdgpu_bo *bo; u64 bytes_moved = 0, initial_bytes_moved; u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev); int r; - INIT_LIST_HEAD(&duplicates); - r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); - if (unlikely(r != 0)) { - return r; - } - - list_for_each_entry(lobj, &p->validated, tv.head) { + list_for_each_entry(lobj, validated, tv.head) { bo = lobj->robj; if (!bo->pin_count) { u32 domain = lobj->prefered_domains; @@ -331,7 +364,7 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p) * into account. We don't want to disallow buffer moves * completely. */ - if (current_domain != AMDGPU_GEM_DOMAIN_CPU && + if ((lobj->allowed_domains & current_domain) != 0 && (domain & current_domain) == 0 && /* will be moved */ bytes_moved > bytes_moved_threshold) { /* don't move it */ @@ -350,7 +383,6 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p) domain = lobj->allowed_domains; goto retry; } - ttm_eu_backoff_reservation(&p->ticket, &p->validated); return r; } } @@ -363,6 +395,7 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_cs_buckets buckets; + struct list_head duplicates; bool need_mmap_lock = false; int i, r; @@ -382,8 +415,22 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) if (need_mmap_lock) down_read(¤t->mm->mmap_sem); - r = amdgpu_cs_list_validate(p); + INIT_LIST_HEAD(&duplicates); + r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); + if (unlikely(r != 0)) + goto error_reserve; + + r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated); + if (r) + goto error_validate; + + r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates); +error_validate: + if (r) + ttm_eu_backoff_reservation(&p->ticket, &p->validated); + +error_reserve: if (need_mmap_lock) up_read(¤t->mm->mmap_sem); @@ -415,18 +462,8 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a, return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; } -/** - * cs_parser_fini() - clean parser states - * @parser: parser structure holding parsing context. - * @error: error number - * - * If error is set than unvalidate buffer, otherwise just free memory - * used by parsing context. - **/ -static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) +static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff) { - unsigned i; - if (!error) { /* Sort the buffer list from the smallest to largest buffer, * which affects the order of buffers in the LRU list. @@ -447,21 +484,45 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); } +} +static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) +{ + unsigned i; if (parser->ctx) amdgpu_ctx_put(parser->ctx); if (parser->bo_list) amdgpu_bo_list_put(parser->bo_list); + drm_free_large(parser->vm_bos); for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); kfree(parser->chunks); - if (parser->ibs) - for (i = 0; i < parser->num_ibs; i++) - amdgpu_ib_free(parser->adev, &parser->ibs[i]); - kfree(parser->ibs); - if (parser->uf.bo) - drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); + if (!amdgpu_enable_scheduler) + { + if (parser->ibs) + for (i = 0; i < parser->num_ibs; i++) + amdgpu_ib_free(parser->adev, &parser->ibs[i]); + kfree(parser->ibs); + if (parser->uf.bo) + drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); + } + + kfree(parser); +} + +/** + * cs_parser_fini() - clean parser states + * @parser: parser structure holding parsing context. + * @error: error number + * + * If error is set than unvalidate buffer, otherwise just free memory + * used by parsing context. + **/ +static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) +{ + amdgpu_cs_parser_fini_early(parser, error, backoff); + amdgpu_cs_parser_fini_late(parser); } static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, @@ -476,12 +537,18 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, if (r) return r; + r = amdgpu_sync_fence(adev, &p->ibs[0].sync, vm->page_directory_fence); + if (r) + return r; + r = amdgpu_vm_clear_freed(adev, vm); if (r) return r; if (p->bo_list) { for (i = 0; i < p->bo_list->num_entries; i++) { + struct fence *f; + /* ignore duplicates */ bo = p->bo_list->array[i].robj; if (!bo) @@ -495,7 +562,10 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, if (r) return r; - amdgpu_sync_fence(&p->ibs[0].sync, bo_va->last_pt_update); + f = bo_va->last_pt_update; + r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f); + if (r) + return r; } } @@ -529,9 +599,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, goto out; } amdgpu_cs_sync_rings(parser); - - r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs, - parser->filp); + if (!amdgpu_enable_scheduler) + r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs, + parser->filp); out: mutex_unlock(&vm->mutex); @@ -650,7 +720,6 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ib->oa_size = amdgpu_bo_size(oa); } } - /* wrap the last IB with user fence */ if (parser->uf.bo) { struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1]; @@ -693,9 +762,9 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, sizeof(struct drm_amdgpu_cs_chunk_dep); for (j = 0; j < num_deps; ++j) { - struct amdgpu_fence *fence; struct amdgpu_ring *ring; struct amdgpu_ctx *ctx; + struct fence *fence; r = amdgpu_cs_get_ring(adev, deps[j].ip_type, deps[j].ip_instance, @@ -707,85 +776,137 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, if (ctx == NULL) return -EINVAL; - r = amdgpu_fence_recreate(ring, p->filp, - deps[j].handle, - &fence); - if (r) { + fence = amdgpu_ctx_get_fence(ctx, ring, + deps[j].handle); + if (IS_ERR(fence)) { + r = PTR_ERR(fence); amdgpu_ctx_put(ctx); return r; - } - amdgpu_sync_fence(&ib->sync, fence); - amdgpu_fence_unref(&fence); - amdgpu_ctx_put(ctx); + } else if (fence) { + r = amdgpu_sync_fence(adev, &ib->sync, fence); + fence_put(fence); + amdgpu_ctx_put(ctx); + if (r) + return r; + } } } return 0; } +static int amdgpu_cs_free_job(struct amdgpu_job *job) +{ + int i; + if (job->ibs) + for (i = 0; i < job->num_ibs; i++) + amdgpu_ib_free(job->adev, &job->ibs[i]); + kfree(job->ibs); + if (job->uf.bo) + drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base); + return 0; +} + int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct amdgpu_device *adev = dev->dev_private; union drm_amdgpu_cs *cs = data; - struct amdgpu_cs_parser parser; - int r, i; + struct amdgpu_cs_parser *parser; bool reserved_buffers = false; + int i, r; down_read(&adev->exclusive_lock); if (!adev->accel_working) { up_read(&adev->exclusive_lock); return -EBUSY; } - /* initialize parser */ - memset(&parser, 0, sizeof(struct amdgpu_cs_parser)); - parser.filp = filp; - parser.adev = adev; - r = amdgpu_cs_parser_init(&parser, data); + + parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0); + if (!parser) + return -ENOMEM; + r = amdgpu_cs_parser_init(parser, data); if (r) { DRM_ERROR("Failed to initialize parser !\n"); - amdgpu_cs_parser_fini(&parser, r, false); + kfree(parser); up_read(&adev->exclusive_lock); r = amdgpu_cs_handle_lockup(adev, r); return r; } - r = amdgpu_cs_parser_relocs(&parser); - if (r) { - if (r != -ERESTARTSYS) { - if (r == -ENOMEM) - DRM_ERROR("Not enough memory for command submission!\n"); - else - DRM_ERROR("Failed to process the buffer list %d!\n", r); - } + r = amdgpu_cs_parser_relocs(parser); + if (r == -ENOMEM) + DRM_ERROR("Not enough memory for command submission!\n"); + else if (r && r != -ERESTARTSYS) + DRM_ERROR("Failed to process the buffer list %d!\n", r); + else if (!r) { + reserved_buffers = true; + r = amdgpu_cs_ib_fill(adev, parser); } if (!r) { - reserved_buffers = true; - r = amdgpu_cs_ib_fill(adev, &parser); + r = amdgpu_cs_dependencies(adev, parser); + if (r) + DRM_ERROR("Failed in the dependencies handling %d!\n", r); } - if (!r) - r = amdgpu_cs_dependencies(adev, &parser); - - if (r) { - amdgpu_cs_parser_fini(&parser, r, reserved_buffers); - up_read(&adev->exclusive_lock); - r = amdgpu_cs_handle_lockup(adev, r); - return r; - } + if (r) + goto out; - for (i = 0; i < parser.num_ibs; i++) - trace_amdgpu_cs(&parser, i); + for (i = 0; i < parser->num_ibs; i++) + trace_amdgpu_cs(parser, i); - r = amdgpu_cs_ib_vm_chunk(adev, &parser); - if (r) { + r = amdgpu_cs_ib_vm_chunk(adev, parser); + if (r) goto out; + + if (amdgpu_enable_scheduler && parser->num_ibs) { + struct amdgpu_job *job; + struct amdgpu_ring * ring = parser->ibs->ring; + job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); + if (!job) + return -ENOMEM; + job->base.sched = &ring->sched; + job->base.s_entity = &parser->ctx->rings[ring->idx].entity; + job->adev = parser->adev; + job->ibs = parser->ibs; + job->num_ibs = parser->num_ibs; + job->base.owner = parser->filp; + mutex_init(&job->job_lock); + if (job->ibs[job->num_ibs - 1].user) { + memcpy(&job->uf, &parser->uf, + sizeof(struct amdgpu_user_fence)); + job->ibs[job->num_ibs - 1].user = &job->uf; + } + + job->free_job = amdgpu_cs_free_job; + mutex_lock(&job->job_lock); + r = amd_sched_entity_push_job(&job->base); + if (r) { + mutex_unlock(&job->job_lock); + amdgpu_cs_free_job(job); + kfree(job); + goto out; + } + cs->out.handle = + amdgpu_ctx_add_fence(parser->ctx, ring, + &job->base.s_fence->base); + parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle; + + list_sort(NULL, &parser->validated, cmp_size_smaller_first); + ttm_eu_fence_buffer_objects(&parser->ticket, + &parser->validated, + &job->base.s_fence->base); + + mutex_unlock(&job->job_lock); + amdgpu_cs_parser_fini_late(parser); + up_read(&adev->exclusive_lock); + return 0; } - cs->out.handle = parser.ibs[parser.num_ibs - 1].fence->seq; + cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence; out: - amdgpu_cs_parser_fini(&parser, r, true); + amdgpu_cs_parser_fini(parser, r, reserved_buffers); up_read(&adev->exclusive_lock); r = amdgpu_cs_handle_lockup(adev, r); return r; @@ -806,30 +927,29 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, union drm_amdgpu_wait_cs *wait = data; struct amdgpu_device *adev = dev->dev_private; unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); - struct amdgpu_fence *fence = NULL; struct amdgpu_ring *ring = NULL; struct amdgpu_ctx *ctx; + struct fence *fence; long r; - ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); - if (ctx == NULL) - return -EINVAL; - r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, wait->in.ring, &ring); - if (r) { - amdgpu_ctx_put(ctx); + if (r) return r; - } - r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence); - if (r) { - amdgpu_ctx_put(ctx); - return r; - } + ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); + if (ctx == NULL) + return -EINVAL; + + fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle); + if (IS_ERR(fence)) + r = PTR_ERR(fence); + else if (fence) { + r = fence_wait_timeout(fence, true, timeout); + fence_put(fence); + } else + r = 1; - r = fence_wait_timeout(&fence->base, true, timeout); - amdgpu_fence_unref(&fence); amdgpu_ctx_put(ctx); if (r < 0) return r; @@ -864,7 +984,16 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, if (!reloc->bo_va) continue; - list_for_each_entry(mapping, &reloc->bo_va->mappings, list) { + list_for_each_entry(mapping, &reloc->bo_va->valids, list) { + if (mapping->it.start > addr || + addr > mapping->it.last) + continue; + + *bo = reloc->bo_va->bo; + return mapping; + } + + list_for_each_entry(mapping, &reloc->bo_va->invalids, list) { if (mapping->it.start > addr || addr > mapping->it.last) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 6c66ac8a1..e0b80ccdf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -25,54 +25,107 @@ #include #include "amdgpu.h" -static void amdgpu_ctx_do_release(struct kref *ref) +int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, + struct amdgpu_ctx *ctx) { - struct amdgpu_ctx *ctx; - struct amdgpu_ctx_mgr *mgr; + unsigned i, j; + int r; - ctx = container_of(ref, struct amdgpu_ctx, refcount); - mgr = &ctx->fpriv->ctx_mgr; + memset(ctx, 0, sizeof(*ctx)); + ctx->adev = adev; + kref_init(&ctx->refcount); + spin_lock_init(&ctx->ring_lock); + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) + ctx->rings[i].sequence = 1; - idr_remove(&mgr->ctx_handles, ctx->id); - kfree(ctx); + if (amdgpu_enable_scheduler) { + /* create context entity for each ring */ + for (i = 0; i < adev->num_rings; i++) { + struct amd_sched_rq *rq; + if (kernel) + rq = &adev->rings[i]->sched.kernel_rq; + else + rq = &adev->rings[i]->sched.sched_rq; + r = amd_sched_entity_init(&adev->rings[i]->sched, + &ctx->rings[i].entity, + rq, amdgpu_sched_jobs); + if (r) + break; + } + + if (i < adev->num_rings) { + for (j = 0; j < i; j++) + amd_sched_entity_fini(&adev->rings[j]->sched, + &ctx->rings[j].entity); + kfree(ctx); + return r; + } + } + return 0; } -int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t *id, uint32_t flags) +void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) +{ + struct amdgpu_device *adev = ctx->adev; + unsigned i, j; + + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) + for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j) + fence_put(ctx->rings[i].fences[j]); + + if (amdgpu_enable_scheduler) { + for (i = 0; i < adev->num_rings; i++) + amd_sched_entity_fini(&adev->rings[i]->sched, + &ctx->rings[i].entity); + } +} + +static int amdgpu_ctx_alloc(struct amdgpu_device *adev, + struct amdgpu_fpriv *fpriv, + uint32_t *id) { - int r; - struct amdgpu_ctx *ctx; struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; + struct amdgpu_ctx *ctx; + int r; ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; mutex_lock(&mgr->lock); - r = idr_alloc(&mgr->ctx_handles, ctx, 0, 0, GFP_KERNEL); + r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL); if (r < 0) { mutex_unlock(&mgr->lock); kfree(ctx); return r; } *id = (uint32_t)r; - - memset(ctx, 0, sizeof(*ctx)); - ctx->id = *id; - ctx->fpriv = fpriv; - kref_init(&ctx->refcount); + r = amdgpu_ctx_init(adev, false, ctx); mutex_unlock(&mgr->lock); - return 0; + return r; } -int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id) +static void amdgpu_ctx_do_release(struct kref *ref) { struct amdgpu_ctx *ctx; + + ctx = container_of(ref, struct amdgpu_ctx, refcount); + + amdgpu_ctx_fini(ctx); + + kfree(ctx); +} + +static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id) +{ struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; + struct amdgpu_ctx *ctx; mutex_lock(&mgr->lock); ctx = idr_find(&mgr->ctx_handles, id); if (ctx) { + idr_remove(&mgr->ctx_handles, id); kref_put(&ctx->refcount, amdgpu_ctx_do_release); mutex_unlock(&mgr->lock); return 0; @@ -86,9 +139,13 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev, union drm_amdgpu_ctx_out *out) { struct amdgpu_ctx *ctx; - struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; + struct amdgpu_ctx_mgr *mgr; unsigned reset_counter; + if (!fpriv) + return -EINVAL; + + mgr = &fpriv->ctx_mgr; mutex_lock(&mgr->lock); ctx = idr_find(&mgr->ctx_handles, id); if (!ctx) { @@ -97,8 +154,8 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev, } /* TODO: these two are always zero */ - out->state.flags = ctx->state.flags; - out->state.hangs = ctx->state.hangs; + out->state.flags = 0x0; + out->state.hangs = 0x0; /* determine if a GPU reset has occured since the last call */ reset_counter = atomic_read(&adev->gpu_reset_counter); @@ -113,28 +170,11 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev, return 0; } -void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv) -{ - struct idr *idp; - struct amdgpu_ctx *ctx; - uint32_t id; - struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; - idp = &mgr->ctx_handles; - - idr_for_each_entry(idp,ctx,id) { - if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1) - DRM_ERROR("ctx (id=%ul) is still alive\n",ctx->id); - } - - mutex_destroy(&mgr->lock); -} - int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { int r; uint32_t id; - uint32_t flags; union drm_amdgpu_ctx *args = data; struct amdgpu_device *adev = dev->dev_private; @@ -142,15 +182,14 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, r = 0; id = args->in.ctx_id; - flags = args->in.flags; switch (args->in.op) { case AMDGPU_CTX_OP_ALLOC_CTX: - r = amdgpu_ctx_alloc(adev, fpriv, &id, flags); + r = amdgpu_ctx_alloc(adev, fpriv, &id); args->out.alloc.ctx_id = id; break; case AMDGPU_CTX_OP_FREE_CTX: - r = amdgpu_ctx_free(adev, fpriv, id); + r = amdgpu_ctx_free(fpriv, id); break; case AMDGPU_CTX_OP_QUERY_STATE: r = amdgpu_ctx_query(adev, fpriv, id, &args->out); @@ -165,7 +204,12 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) { struct amdgpu_ctx *ctx; - struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; + struct amdgpu_ctx_mgr *mgr; + + if (!fpriv) + return NULL; + + mgr = &fpriv->ctx_mgr; mutex_lock(&mgr->lock); ctx = idr_find(&mgr->ctx_handles, id); @@ -177,17 +221,86 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) int amdgpu_ctx_put(struct amdgpu_ctx *ctx) { - struct amdgpu_fpriv *fpriv; - struct amdgpu_ctx_mgr *mgr; - if (ctx == NULL) return -EINVAL; - fpriv = ctx->fpriv; - mgr = &fpriv->ctx_mgr; - mutex_lock(&mgr->lock); kref_put(&ctx->refcount, amdgpu_ctx_do_release); - mutex_unlock(&mgr->lock); - return 0; } + +uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, + struct fence *fence) +{ + struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; + uint64_t seq = cring->sequence; + unsigned idx = 0; + struct fence *other = NULL; + + idx = seq % AMDGPU_CTX_MAX_CS_PENDING; + other = cring->fences[idx]; + if (other) { + signed long r; + r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); + if (r < 0) + DRM_ERROR("Error (%ld) waiting for fence!\n", r); + } + + fence_get(fence); + + spin_lock(&ctx->ring_lock); + cring->fences[idx] = fence; + cring->sequence++; + spin_unlock(&ctx->ring_lock); + + fence_put(other); + + return seq; +} + +struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, + struct amdgpu_ring *ring, uint64_t seq) +{ + struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; + struct fence *fence; + + spin_lock(&ctx->ring_lock); + + if (seq >= cring->sequence) { + spin_unlock(&ctx->ring_lock); + return ERR_PTR(-EINVAL); + } + + + if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) { + spin_unlock(&ctx->ring_lock); + return NULL; + } + + fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]); + spin_unlock(&ctx->ring_lock); + + return fence; +} + +void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) +{ + mutex_init(&mgr->lock); + idr_init(&mgr->ctx_handles); +} + +void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) +{ + struct amdgpu_ctx *ctx; + struct idr *idp; + uint32_t id; + + idp = &mgr->ctx_handles; + + idr_for_each_entry(idp, ctx, id) { + if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1) + DRM_ERROR("ctx %p is still alive\n", ctx); + } + + idr_destroy(&mgr->ctx_handles); + mutex_destroy(&mgr->lock); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 99f158e1b..6068d8207 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -55,6 +55,7 @@ static const char *amdgpu_asic_name[] = { "MULLINS", "TOPAZ", "TONGA", + "FIJI", "CARRIZO", "LAST", }; @@ -63,7 +64,7 @@ bool amdgpu_device_is_px(struct drm_device *dev) { struct amdgpu_device *adev = dev->dev_private; - if (adev->flags & AMDGPU_IS_PX) + if (adev->flags & AMD_IS_PX) return true; return false; } @@ -243,8 +244,9 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) if (adev->vram_scratch.robj == NULL) { r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, - PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, - NULL, &adev->vram_scratch.robj); + PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, &adev->vram_scratch.robj); if (r) { return r; } @@ -447,7 +449,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) if (adev->wb.wb_obj == NULL) { r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, &adev->wb.wb_obj); + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, + &adev->wb.wb_obj); if (r) { dev_warn(adev->dev, "(%d) create WB bo failed\n", r); return r; @@ -1160,6 +1163,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_TOPAZ: case CHIP_TONGA: + case CHIP_FIJI: case CHIP_CARRIZO: if (adev->asic_type == CHIP_CARRIZO) adev->family = AMDGPU_FAMILY_CZ; @@ -1377,7 +1381,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->ddev = ddev; adev->pdev = pdev; adev->flags = flags; - adev->asic_type = flags & AMDGPU_ASIC_MASK; + adev->asic_type = flags & AMD_ASIC_MASK; adev->is_atom_bios = false; adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; adev->mc.gtt_size = 512 * 1024 * 1024; @@ -1523,6 +1527,11 @@ int amdgpu_device_init(struct amdgpu_device *adev, return r; } + r = amdgpu_ctx_init(adev, true, &adev->kernel_ctx); + if (r) { + dev_err(adev->dev, "failed to create kernel context (%d).\n", r); + return r; + } r = amdgpu_ib_ring_tests(adev); if (r) DRM_ERROR("ib ring test failed (%d).\n", r); @@ -1584,6 +1593,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) adev->shutdown = true; /* evict vram memory */ amdgpu_bo_evict_vram(adev); + amdgpu_ctx_fini(&adev->kernel_ctx); amdgpu_ib_pool_fini(adev); amdgpu_fence_driver_fini(adev); amdgpu_fbdev_fini(adev); @@ -1627,8 +1637,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) struct amdgpu_device *adev; struct drm_crtc *crtc; struct drm_connector *connector; - int i, r; - bool force_completion = false; + int r; if (dev == NULL || dev->dev_private == NULL) { return -ENODEV; @@ -1642,9 +1651,11 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) drm_kms_helper_poll_disable(dev); /* turn off display hw */ + drm_modeset_lock_all(dev); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); } + drm_modeset_unlock_all(dev); /* unpin the front buffers */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { @@ -1667,21 +1678,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) /* evict vram memory */ amdgpu_bo_evict_vram(adev); - /* wait for gpu to finish processing current batch */ - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { - struct amdgpu_ring *ring = adev->rings[i]; - if (!ring) - continue; - - r = amdgpu_fence_wait_empty(ring); - if (r) { - /* delay GPU reset to resume */ - force_completion = true; - } - } - if (force_completion) { - amdgpu_fence_driver_force_completion(adev); - } + amdgpu_fence_driver_suspend(adev); r = amdgpu_suspend(adev); @@ -1739,6 +1736,8 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) r = amdgpu_resume(adev); + amdgpu_fence_driver_resume(adev); + r = amdgpu_ib_ring_tests(adev); if (r) DRM_ERROR("ib ring test failed (%d).\n", r); @@ -1751,9 +1750,11 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) if (fbcon) { drm_helper_resume_force_mode(dev); /* turn on display hw */ + drm_modeset_lock_all(dev); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); } + drm_modeset_unlock_all(dev); } drm_kms_helper_poll_enable(dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 4c4035fde..6c9e0902a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -35,6 +35,36 @@ #include #include +static void amdgpu_flip_wait_fence(struct amdgpu_device *adev, + struct fence **f) +{ + struct amdgpu_fence *fence; + long r; + + if (*f == NULL) + return; + + fence = to_amdgpu_fence(*f); + if (fence) { + r = fence_wait(&fence->base, false); + if (r == -EDEADLK) { + up_read(&adev->exclusive_lock); + r = amdgpu_gpu_reset(adev); + down_read(&adev->exclusive_lock); + } + } else + r = fence_wait(*f, false); + + if (r) + DRM_ERROR("failed to wait on page flip fence (%ld)!\n", r); + + /* We continue with the page flip even if we failed to wait on + * the fence, otherwise the DRM core and userspace will be + * confused about which BO the CRTC is scanning out + */ + fence_put(*f); + *f = NULL; +} static void amdgpu_flip_work_func(struct work_struct *__work) { @@ -44,34 +74,13 @@ static void amdgpu_flip_work_func(struct work_struct *__work) struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id]; struct drm_crtc *crtc = &amdgpuCrtc->base; - struct amdgpu_fence *fence; unsigned long flags; - int r; + unsigned i; down_read(&adev->exclusive_lock); - if (work->fence) { - fence = to_amdgpu_fence(work->fence); - if (fence) { - r = amdgpu_fence_wait(fence, false); - if (r == -EDEADLK) { - up_read(&adev->exclusive_lock); - r = amdgpu_gpu_reset(adev); - down_read(&adev->exclusive_lock); - } - } else - r = fence_wait(work->fence, false); - - if (r) - DRM_ERROR("failed to wait on page flip fence (%d)!\n", r); - - /* We continue with the page flip even if we failed to wait on - * the fence, otherwise the DRM core and userspace will be - * confused about which BO the CRTC is scanning out - */ - - fence_put(work->fence); - work->fence = NULL; - } + amdgpu_flip_wait_fence(adev, &work->excl); + for (i = 0; i < work->shared_count; ++i) + amdgpu_flip_wait_fence(adev, &work->shared[i]); /* We borrow the event spin lock for protecting flip_status */ spin_lock_irqsave(&crtc->dev->event_lock, flags); @@ -106,6 +115,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work) DRM_ERROR("failed to reserve buffer after flip\n"); drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); + kfree(work->shared); kfree(work); } @@ -125,7 +135,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, unsigned long flags; u64 tiling_flags; u64 base; - int r; + int i, r; work = kzalloc(sizeof *work, GFP_KERNEL); if (work == NULL) @@ -165,7 +175,15 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, goto cleanup; } - work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); + r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl, + &work->shared_count, + &work->shared); + if (unlikely(r != 0)) { + amdgpu_bo_unreserve(new_rbo); + DRM_ERROR("failed to get fences for buffer\n"); + goto cleanup; + } + amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags); amdgpu_bo_unreserve(new_rbo); @@ -210,7 +228,10 @@ pflip_cleanup: cleanup: drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); - fence_put(work->fence); + fence_put(work->excl); + for (i = 0; i < work->shared_count; ++i) + fence_put(work->shared[i]); + kfree(work->shared); kfree(work); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 56da96223..b190c2a83 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -44,12 +44,15 @@ #include "amdgpu.h" #include "amdgpu_irq.h" +#include "amdgpu_amdkfd.h" + /* * KMS wrapper. * - 3.0.0 - initial driver + * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP) */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 0 +#define KMS_DRIVER_MINOR 1 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; @@ -61,7 +64,7 @@ int amdgpu_disp_priority = 0; int amdgpu_hw_i2c = 0; int amdgpu_pcie_gen2 = -1; int amdgpu_msi = -1; -int amdgpu_lockup_timeout = 10000; +int amdgpu_lockup_timeout = 0; int amdgpu_dpm = -1; int amdgpu_smc_load_fw = 1; int amdgpu_aspm = -1; @@ -73,6 +76,10 @@ int amdgpu_deep_color = 0; int amdgpu_vm_size = 8; int amdgpu_vm_block_size = -1; int amdgpu_exp_hw_support = 0; +int amdgpu_enable_scheduler = 0; +int amdgpu_sched_jobs = 16; +int amdgpu_sched_hw_submission = 2; +int amdgpu_enable_semaphores = 1; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -101,7 +108,7 @@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444); MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)"); module_param_named(msi, amdgpu_msi, int, 0444); -MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)"); +MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 0 = disable)"); module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444); MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)"); @@ -137,36 +144,48 @@ module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444); MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); +MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable, 0 = disable ((default))"); +module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444); + +MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 16)"); +module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444); + +MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); +module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); + +MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable (default), 0 = disable)"); +module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644); + static struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_CIK /* Kaveri */ - {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, - {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU}, + {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, + {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, /* Bonaire */ - {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY}, - {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY}, - {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY}, - {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY}, + {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY}, + {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY}, + {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY}, + {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY}, {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE}, {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE}, {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE}, @@ -188,46 +207,46 @@ static struct pci_device_id pciidlist[] = { {0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII}, {0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII}, /* Kabini */ - {0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, - {0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, - {0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, - {0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, - {0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, - {0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, - {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, - {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, - {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU}, + {0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, + {0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, + {0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, + {0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, + {0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, + {0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, + {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, + {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, + {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU}, /* mullins */ - {0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, - {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU}, + {0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, + {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, #endif /* topaz */ - {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, + {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, /* tonga */ {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, @@ -238,12 +257,14 @@ static struct pci_device_id pciidlist[] = { {0x1002, 0x6930, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, {0x1002, 0x6938, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, {0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, + /* fiji */ + {0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI}, /* carrizo */ - {0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU}, - {0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU}, - {0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU}, - {0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU}, - {0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU}, + {0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, + {0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, + {0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, + {0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, + {0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, {0, 0, 0} }; @@ -279,7 +300,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, unsigned long flags = ent->driver_data; int ret; - if ((flags & AMDGPU_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) { + if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) { DRM_INFO("This hardware requires experimental hardware support.\n" "See modparam exp_hw_support\n"); return -ENODEV; @@ -527,12 +548,15 @@ static int __init amdgpu_init(void) driver->num_ioctls = amdgpu_max_kms_ioctl; amdgpu_register_atpx_handler(); + amdgpu_amdkfd_init(); + /* let modprobe override vga console setting */ return drm_pci_init(driver, pdriver); } static void __exit amdgpu_exit(void) { + amdgpu_amdkfd_fini(); drm_pci_exit(driver, pdriver); amdgpu_unregister_atpx_handler(); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h index cceeb33c4..e3a4f7048 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h @@ -31,7 +31,7 @@ #include #include -#include "amdgpu_family.h" +#include "amd_shared.h" /* General customization: */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_family.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_family.h deleted file mode 100644 index 069876435..000000000 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_family.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2008 Advanced Micro Devices, Inc. - * Copyright 2008 Red Hat Inc. - * Copyright 2009 Jerome Glisse. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Dave Airlie - * Alex Deucher - * Jerome Glisse - */ - -/* this file defines the CHIP_ and family flags used in the pciids, - * its is common between kms and non-kms because duplicating it and - * changing one place is fail. - */ -#ifndef AMDGPU_FAMILY_H -#define AMDGPU_FAMILY_H -/* - * Supported ASIC types - */ -enum amdgpu_asic_type { - CHIP_BONAIRE = 0, - CHIP_KAVERI, - CHIP_KABINI, - CHIP_HAWAII, - CHIP_MULLINS, - CHIP_TOPAZ, - CHIP_TONGA, - CHIP_CARRIZO, - CHIP_LAST, -}; - -/* - * Chip flags - */ -enum amdgpu_chip_flags { - AMDGPU_ASIC_MASK = 0x0000ffffUL, - AMDGPU_FLAGS_MASK = 0xffff0000UL, - AMDGPU_IS_MOBILITY = 0x00010000UL, - AMDGPU_IS_APU = 0x00020000UL, - AMDGPU_IS_PX = 0x00040000UL, - AMDGPU_EXP_HW_SUPPORT = 0x00080000UL, -}; - -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index c1645d21f..96290d9cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -53,9 +53,9 @@ static struct fb_ops amdgpufb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, + .fb_fillrect = drm_fb_helper_cfb_fillrect, + .fb_copyarea = drm_fb_helper_cfb_copyarea, + .fb_imageblit = drm_fb_helper_cfb_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, @@ -126,8 +126,8 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, aligned_size = ALIGN(size, PAGE_SIZE); ret = amdgpu_gem_object_create(adev, aligned_size, 0, AMDGPU_GEM_DOMAIN_VRAM, - 0, true, - &gobj); + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + true, &gobj); if (ret) { printk(KERN_ERR "failed to allocate framebuffer (%d)\n", aligned_size); @@ -179,7 +179,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper, struct drm_mode_fb_cmd2 mode_cmd; struct drm_gem_object *gobj = NULL; struct amdgpu_bo *rbo = NULL; - struct device *device = &adev->pdev->dev; int ret; unsigned long tmp; @@ -201,9 +200,9 @@ static int amdgpufb_create(struct drm_fb_helper *helper, rbo = gem_to_amdgpu_bo(gobj); /* okay we have an object now allocate the framebuffer */ - info = framebuffer_alloc(0, device); - if (info == NULL) { - ret = -ENOMEM; + info = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(info)) { + ret = PTR_ERR(info); goto out_unref; } @@ -212,14 +211,13 @@ static int amdgpufb_create(struct drm_fb_helper *helper, ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj); if (ret) { DRM_ERROR("failed to initialize framebuffer %d\n", ret); - goto out_unref; + goto out_destroy_fbi; } fb = &rfbdev->rfb.base; /* setup helper */ rfbdev->helper.fb = fb; - rfbdev->helper.fbdev = info; memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo)); @@ -239,11 +237,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper, drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); /* setup aperture base/size for vesafb takeover */ - info->apertures = alloc_apertures(1); - if (!info->apertures) { - ret = -ENOMEM; - goto out_unref; - } info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base; info->apertures->ranges[0].size = adev->mc.aper_size; @@ -251,13 +244,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, if (info->screen_base == NULL) { ret = -ENOSPC; - goto out_unref; - } - - ret = fb_alloc_cmap(&info->cmap, 256, 0); - if (ret) { - ret = -ENOMEM; - goto out_unref; + goto out_destroy_fbi; } DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); @@ -269,6 +256,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper, vga_switcheroo_client_fb_set(adev->ddev->pdev, info); return 0; +out_destroy_fbi: + drm_fb_helper_release_fbi(helper); out_unref: if (rbo) { @@ -290,17 +279,10 @@ void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev) static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev) { - struct fb_info *info; struct amdgpu_framebuffer *rfb = &rfbdev->rfb; - if (rfbdev->helper.fbdev) { - info = rfbdev->helper.fbdev; - - unregister_framebuffer(info); - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - framebuffer_release(info); - } + drm_fb_helper_unregister_fbi(&rfbdev->helper); + drm_fb_helper_release_fbi(&rfbdev->helper); if (rfb->obj) { amdgpufb_destroy_pinned_object(rfb->obj); @@ -395,7 +377,8 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev) void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state) { if (adev->mode_info.rfbdev) - fb_set_suspend(adev->mode_info.rfbdev->helper.fbdev, state); + drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper, + state); } int amdgpu_fbdev_total_size(struct amdgpu_device *adev) @@ -419,3 +402,19 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) return true; return false; } + +void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev) +{ + struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev; + struct drm_fb_helper *fb_helper; + int ret; + + if (!afbdev) + return; + + fb_helper = &afbdev->helper; + + ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); + if (ret) + DRM_DEBUG("failed to restore crtc mode\n"); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index a7189a1fa..b3fc26c59 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -126,7 +126,8 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, (*fence)->ring = ring; (*fence)->owner = owner; fence_init(&(*fence)->base, &amdgpu_fence_ops, - &adev->fence_queue.lock, adev->fence_context + ring->idx, + &ring->fence_drv.fence_queue.lock, + adev->fence_context + ring->idx, (*fence)->seq); amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, (*fence)->seq, @@ -135,38 +136,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, return 0; } -/** - * amdgpu_fence_recreate - recreate a fence from an user fence - * - * @ring: ring the fence is associated with - * @owner: creator of the fence - * @seq: user fence sequence number - * @fence: resulting amdgpu fence object - * - * Recreates a fence command from the user fence sequence number (all asics). - * Returns 0 on success, -ENOMEM on failure. - */ -int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner, - uint64_t seq, struct amdgpu_fence **fence) -{ - struct amdgpu_device *adev = ring->adev; - - if (seq > ring->fence_drv.sync_seq[ring->idx]) - return -EINVAL; - - *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); - if ((*fence) == NULL) - return -ENOMEM; - - (*fence)->seq = seq; - (*fence)->ring = ring; - (*fence)->owner = owner; - fence_init(&(*fence)->base, &amdgpu_fence_ops, - &adev->fence_queue.lock, adev->fence_context + ring->idx, - (*fence)->seq); - return 0; -} - /** * amdgpu_fence_check_signaled - callback from fence_queue * @@ -196,9 +165,7 @@ static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl else FENCE_TRACE(&fence->base, "was already signaled\n"); - amdgpu_irq_put(adev, fence->ring->fence_drv.irq_src, - fence->ring->fence_drv.irq_type); - __remove_wait_queue(&adev->fence_queue, &fence->fence_wake); + __remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake); fence_put(&fence->base); } else FENCE_TRACE(&fence->base, "pending\n"); @@ -299,14 +266,9 @@ static void amdgpu_fence_check_lockup(struct work_struct *work) return; } - if (fence_drv->delayed_irq && ring->adev->ddev->irq_enabled) { - fence_drv->delayed_irq = false; - amdgpu_irq_update(ring->adev, fence_drv->irq_src, - fence_drv->irq_type); + if (amdgpu_fence_activity(ring)) { + wake_up_all(&ring->fence_drv.fence_queue); } - - if (amdgpu_fence_activity(ring)) - wake_up_all(&ring->adev->fence_queue); else if (amdgpu_ring_is_lockup(ring)) { /* good news we believe it's a lockup */ dev_warn(ring->adev->dev, "GPU lockup (current fence id " @@ -316,7 +278,7 @@ static void amdgpu_fence_check_lockup(struct work_struct *work) /* remember that we need an reset */ ring->adev->needs_reset = true; - wake_up_all(&ring->adev->fence_queue); + wake_up_all(&ring->fence_drv.fence_queue); } up_read(&ring->adev->exclusive_lock); } @@ -332,62 +294,8 @@ static void amdgpu_fence_check_lockup(struct work_struct *work) */ void amdgpu_fence_process(struct amdgpu_ring *ring) { - uint64_t seq, last_seq, last_emitted; - unsigned count_loop = 0; - bool wake = false; - - /* Note there is a scenario here for an infinite loop but it's - * very unlikely to happen. For it to happen, the current polling - * process need to be interrupted by another process and another - * process needs to update the last_seq btw the atomic read and - * xchg of the current process. - * - * More over for this to go in infinite loop there need to be - * continuously new fence signaled ie amdgpu_fence_read needs - * to return a different value each time for both the currently - * polling process and the other process that xchg the last_seq - * btw atomic read and xchg of the current process. And the - * value the other process set as last seq must be higher than - * the seq value we just read. Which means that current process - * need to be interrupted after amdgpu_fence_read and before - * atomic xchg. - * - * To be even more safe we count the number of time we loop and - * we bail after 10 loop just accepting the fact that we might - * have temporarly set the last_seq not to the true real last - * seq but to an older one. - */ - last_seq = atomic64_read(&ring->fence_drv.last_seq); - do { - last_emitted = ring->fence_drv.sync_seq[ring->idx]; - seq = amdgpu_fence_read(ring); - seq |= last_seq & 0xffffffff00000000LL; - if (seq < last_seq) { - seq &= 0xffffffff; - seq |= last_emitted & 0xffffffff00000000LL; - } - - if (seq <= last_seq || seq > last_emitted) { - break; - } - /* If we loop over we don't want to return without - * checking if a fence is signaled as it means that the - * seq we just read is different from the previous on. - */ - wake = true; - last_seq = seq; - if ((count_loop++) > 10) { - /* We looped over too many time leave with the - * fact that we might have set an older fence - * seq then the current real last seq as signaled - * by the hw. - */ - break; - } - } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); - - if (wake) - wake_up_all(&ring->adev->fence_queue); + if (amdgpu_fence_activity(ring)) + wake_up_all(&ring->fence_drv.fence_queue); } /** @@ -447,284 +355,49 @@ static bool amdgpu_fence_enable_signaling(struct fence *f) { struct amdgpu_fence *fence = to_amdgpu_fence(f); struct amdgpu_ring *ring = fence->ring; - struct amdgpu_device *adev = ring->adev; if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) return false; - if (down_read_trylock(&adev->exclusive_lock)) { - amdgpu_irq_get(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); - if (amdgpu_fence_activity(ring)) - wake_up_all_locked(&adev->fence_queue); - - /* did fence get signaled after we enabled the sw irq? */ - if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) { - amdgpu_irq_put(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); - up_read(&adev->exclusive_lock); - return false; - } - - up_read(&adev->exclusive_lock); - } else { - /* we're probably in a lockup, lets not fiddle too much */ - if (amdgpu_irq_get_delayed(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type)) - ring->fence_drv.delayed_irq = true; - amdgpu_fence_schedule_check(ring); - } - fence->fence_wake.flags = 0; fence->fence_wake.private = NULL; fence->fence_wake.func = amdgpu_fence_check_signaled; - __add_wait_queue(&adev->fence_queue, &fence->fence_wake); + __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake); fence_get(f); FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); return true; } -/** - * amdgpu_fence_signaled - check if a fence has signaled - * - * @fence: amdgpu fence object - * - * Check if the requested fence has signaled (all asics). - * Returns true if the fence has signaled or false if it has not. - */ -bool amdgpu_fence_signaled(struct amdgpu_fence *fence) -{ - if (!fence) - return true; - - if (amdgpu_fence_seq_signaled(fence->ring, fence->seq)) { - if (!fence_signal(&fence->base)) - FENCE_TRACE(&fence->base, "signaled from amdgpu_fence_signaled\n"); - return true; - } - - return false; -} - -/** - * amdgpu_fence_any_seq_signaled - check if any sequence number is signaled - * - * @adev: amdgpu device pointer - * @seq: sequence numbers - * - * Check if the last signaled fence sequnce number is >= the requested - * sequence number (all asics). - * Returns true if any has signaled (current value is >= requested value) - * or false if it has not. Helper function for amdgpu_fence_wait_seq. - */ -static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq) -{ - unsigned i; - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - if (!adev->rings[i] || !seq[i]) - continue; - - if (amdgpu_fence_seq_signaled(adev->rings[i], seq[i])) - return true; - } - - return false; -} - -/** - * amdgpu_fence_wait_seq_timeout - wait for a specific sequence numbers - * - * @adev: amdgpu device pointer - * @target_seq: sequence number(s) we want to wait for - * @intr: use interruptable sleep - * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait - * - * Wait for the requested sequence number(s) to be written by any ring - * (all asics). Sequnce number array is indexed by ring id. - * @intr selects whether to use interruptable (true) or non-interruptable - * (false) sleep when waiting for the sequence number. Helper function - * for amdgpu_fence_wait_*(). - * Returns remaining time if the sequence number has passed, 0 when - * the wait timeout, or an error for all other cases. - * -EDEADLK is returned when a GPU lockup has been detected. - */ -static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, - u64 *target_seq, bool intr, - long timeout) -{ - uint64_t last_seq[AMDGPU_MAX_RINGS]; - bool signaled; - int i; - long r; - - if (timeout == 0) { - return amdgpu_fence_any_seq_signaled(adev, target_seq); - } - - while (!amdgpu_fence_any_seq_signaled(adev, target_seq)) { - - /* Save current sequence values, used to check for GPU lockups */ - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - struct amdgpu_ring *ring = adev->rings[i]; - - if (!ring || !target_seq[i]) - continue; - - last_seq[i] = atomic64_read(&ring->fence_drv.last_seq); - trace_amdgpu_fence_wait_begin(adev->ddev, i, target_seq[i]); - amdgpu_irq_get(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); - } - - if (intr) { - r = wait_event_interruptible_timeout(adev->fence_queue, ( - (signaled = amdgpu_fence_any_seq_signaled(adev, target_seq)) - || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT); - } else { - r = wait_event_timeout(adev->fence_queue, ( - (signaled = amdgpu_fence_any_seq_signaled(adev, target_seq)) - || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT); - } - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - struct amdgpu_ring *ring = adev->rings[i]; - - if (!ring || !target_seq[i]) - continue; - - amdgpu_irq_put(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); - trace_amdgpu_fence_wait_end(adev->ddev, i, target_seq[i]); - } - - if (unlikely(r < 0)) - return r; - - if (unlikely(!signaled)) { - - if (adev->needs_reset) - return -EDEADLK; - - /* we were interrupted for some reason and fence - * isn't signaled yet, resume waiting */ - if (r) - continue; - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - struct amdgpu_ring *ring = adev->rings[i]; - - if (!ring || !target_seq[i]) - continue; - - if (last_seq[i] != atomic64_read(&ring->fence_drv.last_seq)) - break; - } - - if (i != AMDGPU_MAX_RINGS) - continue; - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - if (!adev->rings[i] || !target_seq[i]) - continue; - - if (amdgpu_ring_is_lockup(adev->rings[i])) - break; - } - - if (i < AMDGPU_MAX_RINGS) { - /* good news we believe it's a lockup */ - dev_warn(adev->dev, "GPU lockup (waiting for " - "0x%016llx last fence id 0x%016llx on" - " ring %d)\n", - target_seq[i], last_seq[i], i); - - /* remember that we need an reset */ - adev->needs_reset = true; - wake_up_all(&adev->fence_queue); - return -EDEADLK; - } - - if (timeout < MAX_SCHEDULE_TIMEOUT) { - timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT; - if (timeout <= 0) { - return 0; - } - } - } - } - return timeout; -} - -/** - * amdgpu_fence_wait - wait for a fence to signal - * - * @fence: amdgpu fence object - * @intr: use interruptable sleep - * - * Wait for the requested fence to signal (all asics). - * @intr selects whether to use interruptable (true) or non-interruptable - * (false) sleep when waiting for the fence. - * Returns 0 if the fence has passed, error for all other cases. - */ -int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr) -{ - uint64_t seq[AMDGPU_MAX_RINGS] = {}; - long r; - - seq[fence->ring->idx] = fence->seq; - r = amdgpu_fence_wait_seq_timeout(fence->ring->adev, seq, intr, MAX_SCHEDULE_TIMEOUT); - if (r < 0) { - return r; - } - - r = fence_signal(&fence->base); - if (!r) - FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); - return 0; -} - -/** - * amdgpu_fence_wait_any - wait for a fence to signal on any ring - * - * @adev: amdgpu device pointer - * @fences: amdgpu fence object(s) - * @intr: use interruptable sleep +/* + * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal + * @ring: ring to wait on for the seq number + * @seq: seq number wait for * - * Wait for any requested fence to signal (all asics). Fence - * array is indexed by ring id. @intr selects whether to use - * interruptable (true) or non-interruptable (false) sleep when - * waiting for the fences. Used by the suballocator. - * Returns 0 if any fence has passed, error for all other cases. + * return value: + * 0: seq signaled, and gpu not hang + * -EDEADL: GPU hang detected + * -EINVAL: some paramter is not valid */ -int amdgpu_fence_wait_any(struct amdgpu_device *adev, - struct amdgpu_fence **fences, - bool intr) +static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq) { - uint64_t seq[AMDGPU_MAX_RINGS]; - unsigned i, num_rings = 0; - long r; - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - seq[i] = 0; + struct amdgpu_device *adev = ring->adev; + bool signaled = false; - if (!fences[i]) { - continue; - } + BUG_ON(!ring); + if (seq > ring->fence_drv.sync_seq[ring->idx]) + return -EINVAL; - seq[i] = fences[i]->seq; - ++num_rings; - } + if (atomic64_read(&ring->fence_drv.last_seq) >= seq) + return 0; - /* nothing to wait for ? */ - if (num_rings == 0) - return -ENOENT; + wait_event(ring->fence_drv.fence_queue, ( + (signaled = amdgpu_fence_seq_signaled(ring, seq)) + || adev->needs_reset)); - r = amdgpu_fence_wait_seq_timeout(adev, seq, intr, MAX_SCHEDULE_TIMEOUT); - if (r < 0) { - return r; - } - return 0; + if (signaled) + return 0; + else + return -EDEADLK; } /** @@ -739,19 +412,12 @@ int amdgpu_fence_wait_any(struct amdgpu_device *adev, */ int amdgpu_fence_wait_next(struct amdgpu_ring *ring) { - uint64_t seq[AMDGPU_MAX_RINGS] = {}; - long r; + uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL; - seq[ring->idx] = atomic64_read(&ring->fence_drv.last_seq) + 1ULL; - if (seq[ring->idx] >= ring->fence_drv.sync_seq[ring->idx]) { - /* nothing to wait for, last_seq is - already the last emited fence */ + if (seq >= ring->fence_drv.sync_seq[ring->idx]) return -ENOENT; - } - r = amdgpu_fence_wait_seq_timeout(ring->adev, seq, false, MAX_SCHEDULE_TIMEOUT); - if (r < 0) - return r; - return 0; + + return amdgpu_fence_ring_wait_seq(ring, seq); } /** @@ -766,23 +432,12 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring) */ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) { - struct amdgpu_device *adev = ring->adev; - uint64_t seq[AMDGPU_MAX_RINGS] = {}; - long r; + uint64_t seq = ring->fence_drv.sync_seq[ring->idx]; - seq[ring->idx] = ring->fence_drv.sync_seq[ring->idx]; - if (!seq[ring->idx]) + if (!seq) return 0; - r = amdgpu_fence_wait_seq_timeout(adev, seq, false, MAX_SCHEDULE_TIMEOUT); - if (r < 0) { - if (r == -EDEADLK) - return -EDEADLK; - - dev_err(adev->dev, "error waiting for ring[%d] to become idle (%ld)\n", - ring->idx, r); - } - return 0; + return amdgpu_fence_ring_wait_seq(ring, seq); } /** @@ -933,9 +588,12 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index; } amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq)); - ring->fence_drv.initialized = true; + amdgpu_irq_get(adev, irq_src, irq_type); + ring->fence_drv.irq_src = irq_src; ring->fence_drv.irq_type = irq_type; + ring->fence_drv.initialized = true; + dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, " "cpu addr 0x%p\n", ring->idx, ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); @@ -951,9 +609,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, * Init the fence driver for the requested ring (all asics). * Helper function for amdgpu_fence_driver_init(). */ -void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) +int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) { - int i; + int i, r; ring->fence_drv.cpu_addr = NULL; ring->fence_drv.gpu_addr = 0; @@ -966,6 +624,20 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) INIT_DELAYED_WORK(&ring->fence_drv.lockup_work, amdgpu_fence_check_lockup); ring->fence_drv.ring = ring; + + init_waitqueue_head(&ring->fence_drv.fence_queue); + + if (amdgpu_enable_scheduler) { + r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, + amdgpu_sched_hw_submission, ring->name); + if (r) { + DRM_ERROR("Failed to create scheduler on ring %s.\n", + ring->name); + return r; + } + } + + return 0; } /** @@ -982,7 +654,6 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) */ int amdgpu_fence_driver_init(struct amdgpu_device *adev) { - init_waitqueue_head(&adev->fence_queue); if (amdgpu_debugfs_fence_init(adev)) dev_err(adev->dev, "fence debugfs file creation failed\n"); @@ -1011,12 +682,76 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) /* no need to trigger GPU reset as we are unloading */ amdgpu_fence_driver_force_completion(adev); } - wake_up_all(&adev->fence_queue); + wake_up_all(&ring->fence_drv.fence_queue); + amdgpu_irq_put(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); + amd_sched_fini(&ring->sched); ring->fence_drv.initialized = false; } mutex_unlock(&adev->ring_lock); } +/** + * amdgpu_fence_driver_suspend - suspend the fence driver + * for all possible rings. + * + * @adev: amdgpu device pointer + * + * Suspend the fence driver for all possible rings (all asics). + */ +void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) +{ + int i, r; + + mutex_lock(&adev->ring_lock); + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + if (!ring || !ring->fence_drv.initialized) + continue; + + /* wait for gpu to finish processing current batch */ + r = amdgpu_fence_wait_empty(ring); + if (r) { + /* delay GPU reset to resume */ + amdgpu_fence_driver_force_completion(adev); + } + + /* disable the interrupt */ + amdgpu_irq_put(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); + } + mutex_unlock(&adev->ring_lock); +} + +/** + * amdgpu_fence_driver_resume - resume the fence driver + * for all possible rings. + * + * @adev: amdgpu device pointer + * + * Resume the fence driver for all possible rings (all asics). + * Not all asics have all rings, so each asic will only + * start the fence driver on the rings it has using + * amdgpu_fence_driver_start_ring(). + * Returns 0 for success. + */ +void amdgpu_fence_driver_resume(struct amdgpu_device *adev) +{ + int i; + + mutex_lock(&adev->ring_lock); + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + if (!ring || !ring->fence_drv.initialized) + continue; + + /* enable the interrupt */ + amdgpu_irq_get(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); + } + mutex_unlock(&adev->ring_lock); +} + /** * amdgpu_fence_driver_force_completion - force all fence waiter to complete * @@ -1104,6 +839,21 @@ static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence) return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); } +static bool amdgpu_test_signaled_any(struct fence **fences, uint32_t count) +{ + int idx; + struct fence *fence; + + for (idx = 0; idx < count; ++idx) { + fence = fences[idx]; + if (fence) { + if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return true; + } + } + return false; +} + struct amdgpu_wait_cb { struct fence_cb base; struct task_struct *task; @@ -1121,12 +871,48 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr, { struct amdgpu_fence *fence = to_amdgpu_fence(f); struct amdgpu_device *adev = fence->ring->adev; - struct amdgpu_wait_cb cb; - cb.task = current; + return amdgpu_fence_wait_any(adev, &f, 1, intr, t); +} + +/** + * Wait the fence array with timeout + * + * @adev: amdgpu device + * @array: the fence array with amdgpu fence pointer + * @count: the number of the fence array + * @intr: when sleep, set the current task interruptable or not + * @t: timeout to wait + * + * It will return when any fence is signaled or timeout. + */ +signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, + struct fence **array, uint32_t count, + bool intr, signed long t) +{ + struct amdgpu_wait_cb *cb; + struct fence *fence; + unsigned idx; + + BUG_ON(!array); - if (fence_add_callback(f, &cb.base, amdgpu_fence_wait_cb)) - return t; + cb = kcalloc(count, sizeof(struct amdgpu_wait_cb), GFP_KERNEL); + if (cb == NULL) { + t = -ENOMEM; + goto err_free_cb; + } + + for (idx = 0; idx < count; ++idx) { + fence = array[idx]; + if (fence) { + cb[idx].task = current; + if (fence_add_callback(fence, + &cb[idx].base, amdgpu_fence_wait_cb)) { + /* The fence is already signaled */ + goto fence_rm_cb; + } + } + } while (t > 0) { if (intr) @@ -1135,10 +921,10 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr, set_current_state(TASK_UNINTERRUPTIBLE); /* - * amdgpu_test_signaled must be called after + * amdgpu_test_signaled_any must be called after * set_current_state to prevent a race with wake_up_process */ - if (amdgpu_test_signaled(fence)) + if (amdgpu_test_signaled_any(array, count)) break; if (adev->needs_reset) { @@ -1153,7 +939,16 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr, } __set_current_state(TASK_RUNNING); - fence_remove_callback(f, &cb.base); + +fence_rm_cb: + for (idx = 0; idx < count; ++idx) { + fence = array[idx]; + if (fence && cb[idx].base.func) + fence_remove_callback(fence, &cb[idx].base); + } + +err_free_cb: + kfree(cb); return t; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index e02db0b2e..7312d729d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -125,8 +125,9 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) if (adev->gart.robj == NULL) { r = amdgpu_bo_create(adev, adev->gart.table_size, - PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, - NULL, &adev->gart.robj); + PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, &adev->gart.robj); if (r) { return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 4afc50782..7297ca3a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -69,7 +69,8 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, } } retry: - r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, flags, NULL, &robj); + r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, + flags, NULL, NULL, &robj); if (r) { if (r != -ERESTARTSYS) { if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { @@ -426,6 +427,10 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, &args->data.data_size_bytes, &args->data.flags); } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) { + if (args->data.data_size_bytes > sizeof(args->data.data)) { + r = -EINVAL; + goto unreserve; + } r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info); if (!r) r = amdgpu_bo_set_metadata(robj, args->data.data, @@ -433,6 +438,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, args->data.flags); } +unreserve: amdgpu_bo_unreserve(robj); out: drm_gem_object_unreference_unlocked(gobj); @@ -454,11 +460,12 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, struct ttm_validate_buffer tv, *entry; struct amdgpu_bo_list_entry *vm_bos; struct ww_acquire_ctx ticket; - struct list_head list; + struct list_head list, duplicates; unsigned domain; int r; INIT_LIST_HEAD(&list); + INIT_LIST_HEAD(&duplicates); tv.bo = &bo_va->bo->tbo; tv.shared = true; @@ -468,7 +475,8 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, if (!vm_bos) return; - r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); + /* Provide duplicates to avoid -EALREADY */ + r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); if (r) goto error_free; @@ -615,6 +623,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; info.domains = robj->initial_domain; info.domain_flags = robj->flags; + amdgpu_bo_unreserve(robj); if (copy_to_user(out, &info, sizeof(info))) r = -EFAULT; break; @@ -622,17 +631,19 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, case AMDGPU_GEM_OP_SET_PLACEMENT: if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) { r = -EPERM; + amdgpu_bo_unreserve(robj); break; } robj->initial_domain = args->value & (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_CPU); + amdgpu_bo_unreserve(robj); break; default: + amdgpu_bo_unreserve(robj); r = -EINVAL; } - amdgpu_bo_unreserve(robj); out: drm_gem_object_unreference_unlocked(gobj); return r; @@ -648,12 +659,13 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, int r; args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); - args->size = args->pitch * args->height; + args->size = (u64)args->pitch * args->height; args->size = ALIGN(args->size, PAGE_SIZE); r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_VRAM, - 0, ttm_bo_type_device, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + ttm_bo_type_device, &gobj); if (r) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index bc0fac618..c439735ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -73,28 +73,12 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, if (!vm) ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); - else - ib->gpu_addr = 0; - - } else { - ib->sa_bo = NULL; - ib->ptr = NULL; - ib->gpu_addr = 0; } amdgpu_sync_create(&ib->sync); ib->ring = ring; - ib->fence = NULL; - ib->user = NULL; ib->vm = vm; - ib->gds_base = 0; - ib->gds_size = 0; - ib->gws_base = 0; - ib->gws_size = 0; - ib->oa_base = 0; - ib->oa_size = 0; - ib->flags = 0; return 0; } @@ -109,8 +93,8 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, */ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib) { - amdgpu_sync_free(adev, &ib->sync, ib->fence); - amdgpu_sa_bo_free(adev, &ib->sa_bo, ib->fence); + amdgpu_sync_free(adev, &ib->sync, &ib->fence->base); + amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base); amdgpu_fence_unref(&ib->fence); } @@ -156,7 +140,11 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, dev_err(adev->dev, "couldn't schedule ib\n"); return -EINVAL; } - + r = amdgpu_sync_wait(&ibs->sync); + if (r) { + dev_err(adev->dev, "IB sync failed (%d).\n", r); + return r; + } r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs); if (r) { dev_err(adev->dev, "scheduling IB failed (%d).\n", r); @@ -165,9 +153,11 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, if (vm) { /* grab a vm id if necessary */ - struct amdgpu_fence *vm_id_fence = NULL; - vm_id_fence = amdgpu_vm_grab_id(ibs->ring, ibs->vm); - amdgpu_sync_fence(&ibs->sync, vm_id_fence); + r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync); + if (r) { + amdgpu_ring_unlock_undo(ring); + return r; + } } r = amdgpu_sync_rings(&ibs->sync, ring); @@ -212,11 +202,15 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, return r; } + if (!amdgpu_enable_scheduler && ib->ctx) + ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring, + &ib->fence->base); + /* wrap the last IB with fence */ if (ib->user) { uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo); addr += ib->user->offset; - amdgpu_ring_emit_fence(ring, addr, ib->fence->seq, + amdgpu_ring_emit_fence(ring, addr, ib->sequence, AMDGPU_FENCE_FLAG_64BIT); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index a8207e5a8..534fc04e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -24,6 +24,7 @@ #include #include "amdgpu.h" #include "amdgpu_ih.h" +#include "amdgpu_amdkfd.h" /** * amdgpu_ih_ring_alloc - allocate memory for the IH ring @@ -42,7 +43,7 @@ static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, adev->irq.ih.ring_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 0, - NULL, &adev->irq.ih.ring_obj); + NULL, NULL, &adev->irq.ih.ring_obj); if (r) { DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r); return r; @@ -193,6 +194,14 @@ restart_ih: rmb(); while (adev->irq.ih.rptr != wptr) { + u32 ring_index = adev->irq.ih.rptr >> 2; + + /* Before dispatching irq to IP blocks, send it to amdkfd */ + amdgpu_amdkfd_interrupt(adev, + (const void *) &adev->irq.ih.ring[ring_index]); + + entry.iv_entry = (const uint32_t *) + &adev->irq.ih.ring[ring_index]; amdgpu_ih_decode_iv(adev, &entry); adev->irq.ih.rptr &= adev->irq.ih.ptr_mask; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index c62b09e55..ba38ae6a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -52,6 +52,7 @@ struct amdgpu_iv_entry { unsigned ring_id; unsigned vm_id; unsigned pas_id; + const uint32_t *iv_entry; }; int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index c098d7620..7c42ff670 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -272,6 +272,11 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) kfree(src->enabled_types); src->enabled_types = NULL; + if (src->data) { + kfree(src->data); + kfree(src); + adev->irq.sources[i] = NULL; + } } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index 8299795f2..17b01aef4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -40,6 +40,7 @@ struct amdgpu_irq_src { unsigned num_types; atomic_t *enabled_types; const struct amdgpu_irq_src_funcs *funcs; + void *data; }; /* provided by interrupt generating IP blocks */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 3bfe67de8..5d11e7982 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -34,6 +34,7 @@ #include #include #include +#include "amdgpu_amdkfd.h" #if defined(CONFIG_VGA_SWITCHEROO) bool amdgpu_has_atpx(void); @@ -61,6 +62,8 @@ int amdgpu_driver_unload_kms(struct drm_device *dev) pm_runtime_get_sync(dev->dev); + amdgpu_amdkfd_device_fini(adev); + amdgpu_acpi_fini(adev); amdgpu_device_fini(adev); @@ -93,8 +96,8 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) if ((amdgpu_runtime_pm != 0) && amdgpu_has_atpx() && - ((flags & AMDGPU_IS_APU) == 0)) - flags |= AMDGPU_IS_PX; + ((flags & AMD_IS_APU) == 0)) + flags |= AMD_IS_PX; /* amdgpu_device_init should report only fatal error * like memory allocation failure or iomapping failure, @@ -118,6 +121,10 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) "Error during ACPI methods call\n"); } + amdgpu_amdkfd_load_interface(adev); + amdgpu_amdkfd_device_probe(adev); + amdgpu_amdkfd_device_init(adev); + if (amdgpu_device_is_px(dev)) { pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); @@ -383,7 +390,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; } case AMDGPU_INFO_READ_MMR_REG: { - unsigned n, alloc_size = info->read_mmr_reg.count * 4; + unsigned n, alloc_size; uint32_t *regs; unsigned se_num = (info->read_mmr_reg.instance >> AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & @@ -399,9 +406,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) sh_num = 0xffffffff; - regs = kmalloc(alloc_size, GFP_KERNEL); + regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL); if (!regs) return -ENOMEM; + alloc_size = info->read_mmr_reg.count * sizeof(*regs); for (i = 0; i < info->read_mmr_reg.count; i++) if (amdgpu_asic_read_register(adev, se_num, sh_num, @@ -444,11 +452,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; dev_info._pad = 0; dev_info.ids_flags = 0; - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; - dev_info.virtual_address_alignment = max(PAGE_SIZE, 0x10000UL); + dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) * AMDGPU_GPU_PAGE_SIZE; dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE; @@ -477,7 +485,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file * Outdated mess for old drm with Xorg being in charge (void function now). */ /** - * amdgpu_driver_firstopen_kms - drm callback for last close + * amdgpu_driver_lastclose_kms - drm callback for last close * * @dev: drm dev pointer * @@ -485,6 +493,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file */ void amdgpu_driver_lastclose_kms(struct drm_device *dev) { + struct amdgpu_device *adev = dev->dev_private; + + amdgpu_fbdev_restore_mode(adev); vga_switcheroo_process_delayed_switch(); } @@ -520,10 +531,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) mutex_init(&fpriv->bo_list_lock); idr_init(&fpriv->bo_list_handles); - /* init context manager */ - mutex_init(&fpriv->ctx_mgr.lock); - idr_init(&fpriv->ctx_mgr.ctx_handles); - fpriv->ctx_mgr.adev = adev; + amdgpu_ctx_mgr_init(&fpriv->ctx_mgr); file_priv->driver_priv = fpriv; @@ -556,6 +564,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, if (!fpriv) return; + amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); + amdgpu_vm_fini(adev, &fpriv->vm); idr_for_each_entry(&fpriv->bo_list_handles, list, handle) @@ -564,9 +574,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, idr_destroy(&fpriv->bo_list_handles); mutex_destroy(&fpriv->bo_list_lock); - /* release context */ - amdgpu_ctx_fini(fpriv); - kfree(fpriv); file_priv->driver_priv = NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 64efe5b52..7bd470d9a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -567,6 +567,7 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev); void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state); int amdgpu_fbdev_total_size(struct amdgpu_device *adev); bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj); +void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev); void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 8da64245b..1a7708f36 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -127,7 +127,7 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, placements[c].fpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; + TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN; } placements[c].fpfn = 0; placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | @@ -215,6 +215,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, bool kernel, u32 domain, u64 flags, struct sg_table *sg, struct ttm_placement *placement, + struct reservation_object *resv, struct amdgpu_bo **bo_ptr) { struct amdgpu_bo *bo; @@ -223,18 +224,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, size_t acc_size; int r; - /* VI has a hw bug where VM PTEs have to be allocated in groups of 8. - * do this as a temporary workaround - */ - if (!(domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) { - if (adev->asic_type >= CHIP_TOPAZ) { - if (byte_align & 0x7fff) - byte_align = ALIGN(byte_align, 0x8000); - if (size & 0x7fff) - size = ALIGN(size, 0x8000); - } - } - page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; size = ALIGN(size, PAGE_SIZE); @@ -273,7 +262,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, /* Kernel allocation are uninterruptible */ r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, !kernel, NULL, - acc_size, sg, NULL, &amdgpu_ttm_bo_destroy); + acc_size, sg, resv, &amdgpu_ttm_bo_destroy); if (unlikely(r != 0)) { return r; } @@ -287,7 +276,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, - struct sg_table *sg, struct amdgpu_bo **bo_ptr) + struct sg_table *sg, + struct reservation_object *resv, + struct amdgpu_bo **bo_ptr) { struct ttm_placement placement = {0}; struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; @@ -298,11 +289,9 @@ int amdgpu_bo_create(struct amdgpu_device *adev, amdgpu_ttm_placement_init(adev, &placement, placements, domain, flags); - return amdgpu_bo_create_restricted(adev, size, byte_align, - kernel, domain, flags, - sg, - &placement, - bo_ptr); + return amdgpu_bo_create_restricted(adev, size, byte_align, kernel, + domain, flags, sg, &placement, + resv, bo_ptr); } int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) @@ -462,7 +451,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) int amdgpu_bo_evict_vram(struct amdgpu_device *adev) { /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ - if (0 && (adev->flags & AMDGPU_IS_APU)) { + if (0 && (adev->flags & AMD_IS_APU)) { /* Useless to evict on IGP chips */ return 0; } @@ -478,7 +467,6 @@ void amdgpu_bo_force_delete(struct amdgpu_device *adev) } dev_err(adev->dev, "Userspace still has active objects !\n"); list_for_each_entry_safe(bo, n, &adev->gem.objects, list) { - mutex_lock(&adev->ddev->struct_mutex); dev_err(adev->dev, "%p %p %lu %lu force free\n", &bo->gem_base, bo, (unsigned long)bo->gem_base.size, *((unsigned long *)&bo->gem_base.refcount)); @@ -486,8 +474,7 @@ void amdgpu_bo_force_delete(struct amdgpu_device *adev) list_del_init(&bo->list); mutex_unlock(&bo->adev->gem.mutex); /* this should unref the ttm bo */ - drm_gem_object_unreference(&bo->gem_base); - mutex_unlock(&adev->ddev->struct_mutex); + drm_gem_object_unreference_unlocked(&bo->gem_base); } } @@ -549,12 +536,10 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, if (metadata == NULL) return -EINVAL; - buffer = kzalloc(metadata_size, GFP_KERNEL); + buffer = kmemdup(metadata, metadata_size, GFP_KERNEL); if (buffer == NULL) return -ENOMEM; - memcpy(buffer, metadata, metadata_size); - kfree(bo->metadata); bo->metadata_flags = flags; bo->metadata = buffer; @@ -658,13 +643,13 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) * @shared: true if fence should be added shared * */ -void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence, +void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, bool shared) { struct reservation_object *resv = bo->tbo.resv; if (shared) - reservation_object_add_shared_fence(resv, &fence->base); + reservation_object_add_shared_fence(resv, fence); else - reservation_object_add_excl_fence(resv, &fence->base); + reservation_object_add_excl_fence(resv, fence); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 675bdc30e..3c2ff4567 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -129,12 +129,14 @@ int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, struct sg_table *sg, + struct reservation_object *resv, struct amdgpu_bo **bo_ptr); int amdgpu_bo_create_restricted(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, struct sg_table *sg, struct ttm_placement *placement, + struct reservation_object *resv, struct amdgpu_bo **bo_ptr); int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); void amdgpu_bo_kunmap(struct amdgpu_bo *bo); @@ -161,7 +163,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem); int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); -void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence, +void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, bool shared); /* @@ -193,7 +195,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev, unsigned size, unsigned align); void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, - struct amdgpu_fence *fence); + struct fence *fence); #if defined(CONFIG_DEBUG_FS) void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, struct seq_file *m); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index ed13baa7c..22a8c7d3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -82,7 +82,7 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, mutex_unlock(&adev->pm.mutex); /* Can't set dpm state when the card is off */ - if (!(adev->flags & AMDGPU_IS_PX) || + if (!(adev->flags & AMD_IS_PX) || (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) amdgpu_pm_compute_clocks(adev); fail: @@ -294,10 +294,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, struct amdgpu_device *adev = dev_get_drvdata(dev); umode_t effective_mode = attr->mode; - /* Skip limit attributes if DPM is not enabled */ + /* Skip attributes if DPM is not enabled */ if (!adev->pm.dpm_enabled && (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || - attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) + attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || + attr == &sensor_dev_attr_pwm1.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) return 0; /* Skip fan attributes if fan is not present */ @@ -538,7 +542,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) /* vce just modifies an existing state so force a change */ if (ps->vce_active != adev->pm.dpm.vce_active) goto force; - if (adev->flags & AMDGPU_IS_APU) { + if (adev->flags & AMD_IS_APU) { /* for APUs if the num crtcs changed but state is the same, * all we need to do is update the display configuration. */ @@ -580,7 +584,6 @@ force: amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); } - mutex_lock(&adev->ddev->struct_mutex); mutex_lock(&adev->ring_lock); /* update whether vce is active */ @@ -628,7 +631,6 @@ force: done: mutex_unlock(&adev->ring_lock); - mutex_unlock(&adev->ddev->struct_mutex); } void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) @@ -693,6 +695,9 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) { int ret; + if (adev->pm.sysfs_initialized) + return 0; + if (adev->pm.funcs->get_temperature == NULL) return 0; adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, @@ -721,6 +726,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) return ret; } + adev->pm.sysfs_initialized = true; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index d9652fe32..59f735a93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -61,12 +61,15 @@ struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg) { + struct reservation_object *resv = attach->dmabuf->resv; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_bo *bo; int ret; + ww_mutex_lock(&resv->lock, NULL); ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, - AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo); + AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo); + ww_mutex_unlock(&resv->lock); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 855e21966..30dce235d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -131,6 +131,21 @@ int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw) return 0; } +/** amdgpu_ring_insert_nop - insert NOP packets + * + * @ring: amdgpu_ring structure holding ring information + * @count: the number of NOP packets to insert + * + * This is the generic insert_nop function for rings except SDMA + */ +void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) +{ + int i; + + for (i = 0; i < count; i++) + amdgpu_ring_write(ring, ring->nop); +} + /** * amdgpu_ring_commit - tell the GPU to execute the new * commands on the ring buffer @@ -143,10 +158,13 @@ int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw) */ void amdgpu_ring_commit(struct amdgpu_ring *ring) { + uint32_t count; + /* We pad to match fetch size */ - while (ring->wptr & ring->align_mask) { - amdgpu_ring_write(ring, ring->nop); - } + count = ring->align_mask + 1 - (ring->wptr & ring->align_mask); + count %= ring->align_mask + 1; + ring->funcs->insert_nop(ring, count); + mb(); amdgpu_ring_set_wptr(ring); } @@ -339,7 +357,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, ring->adev = adev; ring->idx = adev->num_rings++; adev->rings[ring->idx] = ring; - amdgpu_fence_driver_init_ring(ring); + r = amdgpu_fence_driver_init_ring(ring); + if (r) + return r; } r = amdgpu_wb_get(adev, &ring->rptr_offs); @@ -367,7 +387,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, } ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4); ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs]; - + spin_lock_init(&ring->fence_lock); r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type); if (r) { dev_err(adev->dev, "failed initializing fences (%d).\n", r); @@ -387,7 +407,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, if (ring->ring_obj == NULL) { r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 0, - NULL, &ring->ring_obj); + NULL, NULL, &ring->ring_obj); if (r) { dev_err(adev->dev, "(%d) ring create failed\n", r); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index eb20987ce..e90712443 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -64,8 +64,8 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, INIT_LIST_HEAD(&sa_manager->flist[i]); } - r = amdgpu_bo_create(adev, size, align, true, - domain, 0, NULL, &sa_manager->bo); + r = amdgpu_bo_create(adev, size, align, true, domain, + 0, NULL, NULL, &sa_manager->bo); if (r) { dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); return r; @@ -139,6 +139,25 @@ int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, return r; } +static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f) +{ + struct amdgpu_fence *a_fence; + struct amd_sched_fence *s_fence; + + s_fence = to_amd_sched_fence(f); + if (s_fence) { + struct amdgpu_ring *ring; + + ring = container_of(s_fence->sched, struct amdgpu_ring, sched); + return ring->idx; + } + + a_fence = to_amdgpu_fence(f); + if (a_fence) + return a_fence->ring->idx; + return 0; +} + static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo) { struct amdgpu_sa_manager *sa_manager = sa_bo->manager; @@ -147,7 +166,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo) } list_del_init(&sa_bo->olist); list_del_init(&sa_bo->flist); - amdgpu_fence_unref(&sa_bo->fence); + fence_put(sa_bo->fence); kfree(sa_bo); } @@ -160,7 +179,8 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager) sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist); list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { - if (sa_bo->fence == NULL || !amdgpu_fence_signaled(sa_bo->fence)) { + if (sa_bo->fence == NULL || + !fence_is_signaled(sa_bo->fence)) { return; } amdgpu_sa_bo_remove_locked(sa_bo); @@ -245,7 +265,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager, } static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, - struct amdgpu_fence **fences, + struct fence **fences, unsigned *tries) { struct amdgpu_sa_bo *best_bo = NULL; @@ -274,7 +294,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, sa_bo = list_first_entry(&sa_manager->flist[i], struct amdgpu_sa_bo, flist); - if (!amdgpu_fence_signaled(sa_bo->fence)) { + if (!fence_is_signaled(sa_bo->fence)) { fences[i] = sa_bo->fence; continue; } @@ -298,7 +318,8 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, } if (best_bo) { - ++tries[best_bo->fence->ring->idx]; + uint32_t idx = amdgpu_sa_get_ring_from_fence(best_bo->fence); + ++tries[idx]; sa_manager->hole = best_bo->olist.prev; /* we knew that this one is signaled, @@ -314,9 +335,10 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, unsigned size, unsigned align) { - struct amdgpu_fence *fences[AMDGPU_MAX_RINGS]; + struct fence *fences[AMDGPU_MAX_RINGS]; unsigned tries[AMDGPU_MAX_RINGS]; int i, r; + signed long t; BUG_ON(align > sa_manager->align); BUG_ON(size > sa_manager->size); @@ -350,7 +372,9 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev, } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); spin_unlock(&sa_manager->wq.lock); - r = amdgpu_fence_wait_any(adev, fences, false); + t = amdgpu_fence_wait_any(adev, fences, AMDGPU_MAX_RINGS, + false, MAX_SCHEDULE_TIMEOUT); + r = (t > 0) ? 0 : t; spin_lock(&sa_manager->wq.lock); /* if we have nothing to wait for block */ if (r == -ENOENT) { @@ -369,7 +393,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev, } void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, - struct amdgpu_fence *fence) + struct fence *fence) { struct amdgpu_sa_manager *sa_manager; @@ -379,10 +403,11 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, sa_manager = (*sa_bo)->manager; spin_lock(&sa_manager->wq.lock); - if (fence && !amdgpu_fence_signaled(fence)) { - (*sa_bo)->fence = amdgpu_fence_ref(fence); - list_add_tail(&(*sa_bo)->flist, - &sa_manager->flist[fence->ring->idx]); + if (fence && !fence_is_signaled(fence)) { + uint32_t idx; + (*sa_bo)->fence = fence_get(fence); + idx = amdgpu_sa_get_ring_from_fence(fence); + list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]); } else { amdgpu_sa_bo_remove_locked(*sa_bo); } @@ -392,6 +417,26 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, } #if defined(CONFIG_DEBUG_FS) + +static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m) +{ + struct amdgpu_fence *a_fence = to_amdgpu_fence(fence); + struct amd_sched_fence *s_fence = to_amd_sched_fence(fence); + + if (a_fence) + seq_printf(m, " protected by 0x%016llx on ring %d", + a_fence->seq, a_fence->ring->idx); + + if (s_fence) { + struct amdgpu_ring *ring; + + + ring = container_of(s_fence->sched, struct amdgpu_ring, sched); + seq_printf(m, " protected by 0x%016x on ring %d", + s_fence->base.seqno, ring->idx); + } +} + void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, struct seq_file *m) { @@ -408,10 +453,8 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, } seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", soffset, eoffset, eoffset - soffset); - if (i->fence) { - seq_printf(m, " protected by 0x%016llx on ring %d", - i->fence->seq, i->fence->ring->idx); - } + if (i->fence) + amdgpu_sa_bo_dump_fence(i->fence, m); seq_printf(m, "\n"); } spin_unlock(&sa_manager->wq.lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c new file mode 100644 index 000000000..2e946b2ca --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -0,0 +1,113 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#include +#include +#include +#include +#include "amdgpu.h" + +static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) +{ + struct amdgpu_job *job = to_amdgpu_job(sched_job); + return amdgpu_sync_get_fence(&job->ibs->sync); +} + +static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job) +{ + struct amdgpu_fence *fence = NULL; + struct amdgpu_job *job; + int r; + + if (!sched_job) { + DRM_ERROR("job is null\n"); + return NULL; + } + job = to_amdgpu_job(sched_job); + mutex_lock(&job->job_lock); + r = amdgpu_ib_schedule(job->adev, + job->num_ibs, + job->ibs, + job->base.owner); + if (r) { + DRM_ERROR("Error scheduling IBs (%d)\n", r); + goto err; + } + + fence = amdgpu_fence_ref(job->ibs[job->num_ibs - 1].fence); + +err: + if (job->free_job) + job->free_job(job); + + mutex_unlock(&job->job_lock); + fence_put(&job->base.s_fence->base); + kfree(job); + return fence ? &fence->base : NULL; +} + +struct amd_sched_backend_ops amdgpu_sched_ops = { + .dependency = amdgpu_sched_dependency, + .run_job = amdgpu_sched_run_job, +}; + +int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_ib *ibs, + unsigned num_ibs, + int (*free_job)(struct amdgpu_job *), + void *owner, + struct fence **f) +{ + int r = 0; + if (amdgpu_enable_scheduler) { + struct amdgpu_job *job = + kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); + if (!job) + return -ENOMEM; + job->base.sched = &ring->sched; + job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; + job->adev = adev; + job->ibs = ibs; + job->num_ibs = num_ibs; + job->base.owner = owner; + mutex_init(&job->job_lock); + job->free_job = free_job; + mutex_lock(&job->job_lock); + r = amd_sched_entity_push_job(&job->base); + if (r) { + mutex_unlock(&job->job_lock); + kfree(job); + return r; + } + *f = fence_get(&job->base.s_fence->base); + mutex_unlock(&job->job_lock); + } else { + r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); + if (r) + return r; + *f = fence_get(&ibs[num_ibs - 1].fence->base); + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c index d6d41a42a..ff3ca52ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c @@ -87,7 +87,7 @@ bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring, void amdgpu_semaphore_free(struct amdgpu_device *adev, struct amdgpu_semaphore **semaphore, - struct amdgpu_fence *fence) + struct fence *fence) { if (semaphore == NULL || *semaphore == NULL) { return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 21accbdd0..4921de15b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -32,6 +32,11 @@ #include "amdgpu.h" #include "amdgpu_trace.h" +struct amdgpu_sync_entry { + struct hlist_node node; + struct fence *fence; +}; + /** * amdgpu_sync_create - zero init sync object * @@ -49,36 +54,110 @@ void amdgpu_sync_create(struct amdgpu_sync *sync) for (i = 0; i < AMDGPU_MAX_RINGS; ++i) sync->sync_to[i] = NULL; + hash_init(sync->fences); sync->last_vm_update = NULL; } +static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f) +{ + struct amdgpu_fence *a_fence = to_amdgpu_fence(f); + struct amd_sched_fence *s_fence = to_amd_sched_fence(f); + + if (a_fence) + return a_fence->ring->adev == adev; + + if (s_fence) { + struct amdgpu_ring *ring; + + ring = container_of(s_fence->sched, struct amdgpu_ring, sched); + return ring->adev == adev; + } + + return false; +} + +static bool amdgpu_sync_test_owner(struct fence *f, void *owner) +{ + struct amdgpu_fence *a_fence = to_amdgpu_fence(f); + struct amd_sched_fence *s_fence = to_amd_sched_fence(f); + if (s_fence) + return s_fence->owner == owner; + if (a_fence) + return a_fence->owner == owner; + return false; +} + /** - * amdgpu_sync_fence - use the semaphore to sync to a fence + * amdgpu_sync_fence - remember to sync to this fence * * @sync: sync object to add fence to * @fence: fence to sync to * - * Sync to the fence using the semaphore objects */ -void amdgpu_sync_fence(struct amdgpu_sync *sync, - struct amdgpu_fence *fence) +int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, + struct fence *f) { + struct amdgpu_sync_entry *e; + struct amdgpu_fence *fence; struct amdgpu_fence *other; + struct fence *tmp, *later; + + if (!f) + return 0; + + if (amdgpu_sync_same_dev(adev, f) && + amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM)) { + if (sync->last_vm_update) { + tmp = sync->last_vm_update; + BUG_ON(f->context != tmp->context); + later = (f->seqno - tmp->seqno <= INT_MAX) ? f : tmp; + sync->last_vm_update = fence_get(later); + fence_put(tmp); + } else + sync->last_vm_update = fence_get(f); + } + + fence = to_amdgpu_fence(f); + if (!fence || fence->ring->adev != adev) { + hash_for_each_possible(sync->fences, e, node, f->context) { + struct fence *new; + if (unlikely(e->fence->context != f->context)) + continue; + new = fence_get(fence_later(e->fence, f)); + if (new) { + fence_put(e->fence); + e->fence = new; + } + return 0; + } + + e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL); + if (!e) + return -ENOMEM; - if (!fence) - return; + hash_add(sync->fences, &e->node, f->context); + e->fence = fence_get(f); + return 0; + } other = sync->sync_to[fence->ring->idx]; sync->sync_to[fence->ring->idx] = amdgpu_fence_ref( amdgpu_fence_later(fence, other)); amdgpu_fence_unref(&other); - if (fence->owner == AMDGPU_FENCE_OWNER_VM) { - other = sync->last_vm_update; - sync->last_vm_update = amdgpu_fence_ref( - amdgpu_fence_later(fence, other)); - amdgpu_fence_unref(&other); - } + return 0; +} + +static void *amdgpu_sync_get_owner(struct fence *f) +{ + struct amdgpu_fence *a_fence = to_amdgpu_fence(f); + struct amd_sched_fence *s_fence = to_amd_sched_fence(f); + + if (s_fence) + return s_fence->owner; + else if (a_fence) + return a_fence->owner; + return AMDGPU_FENCE_OWNER_UNDEFINED; } /** @@ -97,7 +176,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, { struct reservation_object_list *flist; struct fence *f; - struct amdgpu_fence *fence; + void *fence_owner; unsigned i; int r = 0; @@ -106,11 +185,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, /* always sync to the exclusive fence */ f = reservation_object_get_excl(resv); - fence = f ? to_amdgpu_fence(f) : NULL; - if (fence && fence->ring->adev == adev) - amdgpu_sync_fence(sync, fence); - else if (f) - r = fence_wait(f, true); + r = amdgpu_sync_fence(adev, sync, f); flist = reservation_object_get_list(resv); if (!flist || r) @@ -119,20 +194,86 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, for (i = 0; i < flist->shared_count; ++i) { f = rcu_dereference_protected(flist->shared[i], reservation_object_held(resv)); - fence = f ? to_amdgpu_fence(f) : NULL; - if (fence && fence->ring->adev == adev) { - if (fence->owner != owner || - fence->owner == AMDGPU_FENCE_OWNER_UNDEFINED) - amdgpu_sync_fence(sync, fence); - } else if (f) { - r = fence_wait(f, true); - if (r) - break; + if (amdgpu_sync_same_dev(adev, f)) { + /* VM updates are only interesting + * for other VM updates and moves. + */ + fence_owner = amdgpu_sync_get_owner(f); + if ((owner != AMDGPU_FENCE_OWNER_MOVE) && + (fence_owner != AMDGPU_FENCE_OWNER_MOVE) && + ((owner == AMDGPU_FENCE_OWNER_VM) != + (fence_owner == AMDGPU_FENCE_OWNER_VM))) + continue; + + /* Ignore fence from the same owner as + * long as it isn't undefined. + */ + if (owner != AMDGPU_FENCE_OWNER_UNDEFINED && + fence_owner == owner) + continue; } + + r = amdgpu_sync_fence(adev, sync, f); + if (r) + break; } return r; } +struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) +{ + struct amdgpu_sync_entry *e; + struct hlist_node *tmp; + struct fence *f; + int i; + + hash_for_each_safe(sync->fences, i, tmp, e, node) { + + f = e->fence; + + hash_del(&e->node); + kfree(e); + + if (!fence_is_signaled(f)) + return f; + + fence_put(f); + } + return NULL; +} + +int amdgpu_sync_wait(struct amdgpu_sync *sync) +{ + struct amdgpu_sync_entry *e; + struct hlist_node *tmp; + int i, r; + + hash_for_each_safe(sync->fences, i, tmp, e, node) { + r = fence_wait(e->fence, false); + if (r) + return r; + + hash_del(&e->node); + fence_put(e->fence); + kfree(e); + } + + if (amdgpu_enable_semaphores) + return 0; + + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + struct amdgpu_fence *fence = sync->sync_to[i]; + if (!fence) + continue; + + r = fence_wait(&fence->base, false); + if (r) + return r; + } + + return 0; +} + /** * amdgpu_sync_rings - sync ring to all registered fences * @@ -164,9 +305,10 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, return -EINVAL; } - if (count >= AMDGPU_NUM_SYNCS) { + if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores || + (count >= AMDGPU_NUM_SYNCS)) { /* not enough room, wait manually */ - r = amdgpu_fence_wait(fence, false); + r = fence_wait(&fence->base, false); if (r) return r; continue; @@ -186,7 +328,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, if (!amdgpu_semaphore_emit_signal(other, semaphore)) { /* signaling wasn't successful wait manually */ amdgpu_ring_undo(other); - r = amdgpu_fence_wait(fence, false); + r = fence_wait(&fence->base, false); if (r) return r; continue; @@ -196,7 +338,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, if (!amdgpu_semaphore_emit_wait(ring, semaphore)) { /* waiting wasn't successful wait manually */ amdgpu_ring_undo(other); - r = amdgpu_fence_wait(fence, false); + r = fence_wait(&fence->base, false); if (r) return r; continue; @@ -220,15 +362,23 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, */ void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct amdgpu_fence *fence) + struct fence *fence) { + struct amdgpu_sync_entry *e; + struct hlist_node *tmp; unsigned i; + hash_for_each_safe(sync->fences, i, tmp, e, node) { + hash_del(&e->node); + fence_put(e->fence); + kfree(e); + } + for (i = 0; i < AMDGPU_NUM_SYNCS; ++i) amdgpu_semaphore_free(adev, &sync->semaphores[i], fence); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) amdgpu_fence_unref(&sync->sync_to[i]); - amdgpu_fence_unref(&sync->last_vm_update); + fence_put(sync->last_vm_update); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index df202999f..4865615e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -59,8 +59,9 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) goto out_cleanup; } - r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, - NULL, &vram_obj); + r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_VRAM, 0, + NULL, NULL, &vram_obj); if (r) { DRM_ERROR("Failed to create VRAM object\n"); goto out_cleanup; @@ -77,10 +78,11 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) void *gtt_map, *vram_map; void **gtt_start, **gtt_end; void **vram_start, **vram_end; - struct amdgpu_fence *fence = NULL; + struct fence *fence = NULL; r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i); + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, + NULL, gtt_obj + i); if (r) { DRM_ERROR("Failed to create GTT object %d\n", i); goto out_lclean; @@ -116,13 +118,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) goto out_lclean_unpin; } - r = amdgpu_fence_wait(fence, false); + r = fence_wait(fence, false); if (r) { DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); goto out_lclean_unpin; } - amdgpu_fence_unref(&fence); + fence_put(fence); r = amdgpu_bo_kmap(vram_obj, &vram_map); if (r) { @@ -161,13 +163,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) goto out_lclean_unpin; } - r = amdgpu_fence_wait(fence, false); + r = fence_wait(fence, false); if (r) { DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); goto out_lclean_unpin; } - amdgpu_fence_unref(&fence); + fence_put(fence); r = amdgpu_bo_kmap(gtt_obj[i], >t_map); if (r) { @@ -214,7 +216,7 @@ out_lclean: amdgpu_bo_unref(>t_obj[i]); } if (fence) - amdgpu_fence_unref(&fence); + fence_put(fence); break; } @@ -238,7 +240,7 @@ void amdgpu_test_moves(struct amdgpu_device *adev) static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev, struct amdgpu_ring *ring, - struct amdgpu_fence **fence) + struct fence **fence) { uint32_t handle = ring->idx ^ 0xdeafbeef; int r; @@ -269,15 +271,16 @@ static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev, DRM_ERROR("Failed to get dummy destroy msg\n"); return r; } - } else { + struct amdgpu_fence *a_fence = NULL; r = amdgpu_ring_lock(ring, 64); if (r) { DRM_ERROR("Failed to lock ring A %d\n", ring->idx); return r; } - amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, fence); + amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, &a_fence); amdgpu_ring_unlock_commit(ring); + *fence = &a_fence->base; } return 0; } @@ -286,7 +289,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev, struct amdgpu_ring *ringA, struct amdgpu_ring *ringB) { - struct amdgpu_fence *fence1 = NULL, *fence2 = NULL; + struct fence *fence1 = NULL, *fence2 = NULL; struct amdgpu_semaphore *semaphore = NULL; int r; @@ -322,7 +325,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev, mdelay(1000); - if (amdgpu_fence_signaled(fence1)) { + if (fence_is_signaled(fence1)) { DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n"); goto out_cleanup; } @@ -335,7 +338,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev, amdgpu_semaphore_emit_signal(ringB, semaphore); amdgpu_ring_unlock_commit(ringB); - r = amdgpu_fence_wait(fence1, false); + r = fence_wait(fence1, false); if (r) { DRM_ERROR("Failed to wait for sync fence 1\n"); goto out_cleanup; @@ -343,7 +346,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev, mdelay(1000); - if (amdgpu_fence_signaled(fence2)) { + if (fence_is_signaled(fence2)) { DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n"); goto out_cleanup; } @@ -356,7 +359,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev, amdgpu_semaphore_emit_signal(ringB, semaphore); amdgpu_ring_unlock_commit(ringB); - r = amdgpu_fence_wait(fence2, false); + r = fence_wait(fence2, false); if (r) { DRM_ERROR("Failed to wait for sync fence 1\n"); goto out_cleanup; @@ -366,10 +369,10 @@ out_cleanup: amdgpu_semaphore_free(adev, &semaphore, NULL); if (fence1) - amdgpu_fence_unref(&fence1); + fence_put(fence1); if (fence2) - amdgpu_fence_unref(&fence2); + fence_put(fence2); if (r) printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); @@ -380,7 +383,7 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev, struct amdgpu_ring *ringB, struct amdgpu_ring *ringC) { - struct amdgpu_fence *fenceA = NULL, *fenceB = NULL; + struct fence *fenceA = NULL, *fenceB = NULL; struct amdgpu_semaphore *semaphore = NULL; bool sigA, sigB; int i, r; @@ -416,11 +419,11 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev, mdelay(1000); - if (amdgpu_fence_signaled(fenceA)) { + if (fence_is_signaled(fenceA)) { DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); goto out_cleanup; } - if (amdgpu_fence_signaled(fenceB)) { + if (fence_is_signaled(fenceB)) { DRM_ERROR("Fence B signaled without waiting for semaphore.\n"); goto out_cleanup; } @@ -435,8 +438,8 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev, for (i = 0; i < 30; ++i) { mdelay(100); - sigA = amdgpu_fence_signaled(fenceA); - sigB = amdgpu_fence_signaled(fenceB); + sigA = fence_is_signaled(fenceA); + sigB = fence_is_signaled(fenceB); if (sigA || sigB) break; } @@ -461,12 +464,12 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev, mdelay(1000); - r = amdgpu_fence_wait(fenceA, false); + r = fence_wait(fenceA, false); if (r) { DRM_ERROR("Failed to wait for sync fence A\n"); goto out_cleanup; } - r = amdgpu_fence_wait(fenceB, false); + r = fence_wait(fenceB, false); if (r) { DRM_ERROR("Failed to wait for sync fence B\n"); goto out_cleanup; @@ -476,10 +479,10 @@ out_cleanup: amdgpu_semaphore_free(adev, &semaphore, NULL); if (fenceA) - amdgpu_fence_unref(&fenceA); + fence_put(fenceA); if (fenceB) - amdgpu_fence_unref(&fenceB); + fence_put(fenceB); if (r) printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index dd3415d2e..364cbe975 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -228,7 +228,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, struct amdgpu_device *adev; struct amdgpu_ring *ring; uint64_t old_start, new_start; - struct amdgpu_fence *fence; + struct fence *fence; int r; adev = amdgpu_get_adev(bo->bdev); @@ -269,9 +269,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, new_mem->num_pages * PAGE_SIZE, /* bytes */ bo->resv, &fence); /* FIXME: handle copy error */ - r = ttm_bo_move_accel_cleanup(bo, &fence->base, + r = ttm_bo_move_accel_cleanup(bo, fence, evict, no_wait_gpu, new_mem); - amdgpu_fence_unref(&fence); + fence_put(fence); return r; } @@ -859,8 +859,9 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, 0, - NULL, &adev->stollen_vga_memory); + AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, &adev->stollen_vga_memory); if (r) { return r; } @@ -987,46 +988,48 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t dst_offset, uint32_t byte_count, struct reservation_object *resv, - struct amdgpu_fence **fence) + struct fence **fence) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_sync sync; uint32_t max_bytes; unsigned num_loops, num_dw; + struct amdgpu_ib *ib; unsigned i; int r; - /* sync other rings */ - amdgpu_sync_create(&sync); - if (resv) { - r = amdgpu_sync_resv(adev, &sync, resv, false); - if (r) { - DRM_ERROR("sync failed (%d).\n", r); - amdgpu_sync_free(adev, &sync, NULL); - return r; - } - } - max_bytes = adev->mman.buffer_funcs->copy_max_bytes; num_loops = DIV_ROUND_UP(byte_count, max_bytes); num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw; - /* for fence and sync */ - num_dw += 64 + AMDGPU_NUM_SYNCS * 8; + /* for IB padding */ + while (num_dw & 0x7) + num_dw++; + + ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); + if (!ib) + return -ENOMEM; - r = amdgpu_ring_lock(ring, num_dw); + r = amdgpu_ib_get(ring, NULL, num_dw * 4, ib); if (r) { - DRM_ERROR("ring lock failed (%d).\n", r); - amdgpu_sync_free(adev, &sync, NULL); + kfree(ib); return r; } - amdgpu_sync_rings(&sync, ring); + ib->length_dw = 0; + + if (resv) { + r = amdgpu_sync_resv(adev, &ib->sync, resv, + AMDGPU_FENCE_OWNER_UNDEFINED); + if (r) { + DRM_ERROR("sync failed (%d).\n", r); + goto error_free; + } + } for (i = 0; i < num_loops; i++) { uint32_t cur_size_in_bytes = min(byte_count, max_bytes); - amdgpu_emit_copy_buffer(adev, ring, src_offset, dst_offset, + amdgpu_emit_copy_buffer(adev, ib, src_offset, dst_offset, cur_size_in_bytes); src_offset += cur_size_in_bytes; @@ -1034,17 +1037,24 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, byte_count -= cur_size_in_bytes; } - r = amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_MOVE, fence); - if (r) { - amdgpu_ring_unlock_undo(ring); - amdgpu_sync_free(adev, &sync, NULL); - return r; - } - - amdgpu_ring_unlock_commit(ring); - amdgpu_sync_free(adev, &sync, *fence); + amdgpu_vm_pad_ib(adev, ib); + WARN_ON(ib->length_dw > num_dw); + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, + &amdgpu_vm_free_job, + AMDGPU_FENCE_OWNER_MOVE, + fence); + if (r) + goto error_free; + if (!amdgpu_enable_scheduler) { + amdgpu_ib_free(adev, ib); + kfree(ib); + } return 0; +error_free: + amdgpu_ib_free(adev, ib); + kfree(ib); + return r; } #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 482e66797..5cc95f1a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -247,7 +247,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) const struct common_firmware_header *header = NULL; err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, bo); + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo); if (err) { dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); err = -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 5b7d80130..40480d874 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -52,6 +52,7 @@ #endif #define FIRMWARE_TONGA "/*(DEBLOBBED)*/" #define FIRMWARE_CARRIZO "/*(DEBLOBBED)*/" +#define FIRMWARE_FIJI "/*(DEBLOBBED)*/" /** * amdgpu_uvd_cs_ctx - Command submission parser context @@ -111,6 +112,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) case CHIP_TONGA: fw_name = FIRMWARE_TONGA; break; + case CHIP_FIJI: + fw_name = FIRMWARE_FIJI; + break; case CHIP_CARRIZO: fw_name = FIRMWARE_CARRIZO; break; @@ -144,7 +148,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE; r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->uvd.vcpu_bo); + AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, &adev->uvd.vcpu_bo); if (r) { dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); return r; @@ -211,31 +217,32 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) int amdgpu_uvd_suspend(struct amdgpu_device *adev) { - unsigned size; - void *ptr; - const struct common_firmware_header *hdr; - int i; + struct amdgpu_ring *ring = &adev->uvd.ring; + int i, r; if (adev->uvd.vcpu_bo == NULL) return 0; - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) - if (atomic_read(&adev->uvd.handles[i])) - break; - - if (i == AMDGPU_MAX_UVD_HANDLES) - return 0; + for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { + uint32_t handle = atomic_read(&adev->uvd.handles[i]); + if (handle != 0) { + struct fence *fence; - hdr = (const struct common_firmware_header *)adev->uvd.fw->data; + amdgpu_uvd_note_usage(adev); - size = amdgpu_bo_size(adev->uvd.vcpu_bo); - size -= le32_to_cpu(hdr->ucode_size_bytes); + r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence); + if (r) { + DRM_ERROR("Error destroying UVD (%d)!\n", r); + continue; + } - ptr = adev->uvd.cpu_addr; - ptr += le32_to_cpu(hdr->ucode_size_bytes); + fence_wait(fence, false); + fence_put(fence); - adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); - memcpy(adev->uvd.saved_bo, ptr, size); + adev->uvd.filp[i] = NULL; + atomic_set(&adev->uvd.handles[i], 0); + } + } return 0; } @@ -260,12 +267,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) ptr = adev->uvd.cpu_addr; ptr += le32_to_cpu(hdr->ucode_size_bytes); - if (adev->uvd.saved_bo != NULL) { - memcpy(ptr, adev->uvd.saved_bo, size); - kfree(adev->uvd.saved_bo); - adev->uvd.saved_bo = NULL; - } else - memset(ptr, 0, size); + memset(ptr, 0, size); return 0; } @@ -278,7 +280,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { uint32_t handle = atomic_read(&adev->uvd.handles[i]); if (handle != 0 && adev->uvd.filp[i] == filp) { - struct amdgpu_fence *fence; + struct fence *fence; amdgpu_uvd_note_usage(adev); @@ -288,8 +290,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) continue; } - amdgpu_fence_wait(fence, false); - amdgpu_fence_unref(&fence); + fence_wait(fence, false); + fence_put(fence); adev->uvd.filp[i] = NULL; atomic_set(&adev->uvd.handles[i], 0); @@ -503,28 +505,25 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, { struct amdgpu_device *adev = ctx->parser->adev; int32_t *msg, msg_type, handle; - struct fence *f; void *ptr; - - int i, r; + long r; + int i; if (offset & 0x3F) { DRM_ERROR("UVD messages must be 64 byte aligned!\n"); return -EINVAL; } - f = reservation_object_get_excl(bo->tbo.resv); - if (f) { - r = amdgpu_fence_wait((struct amdgpu_fence *)f, false); - if (r) { - DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); - return r; - } + r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, + MAX_SCHEDULE_TIMEOUT); + if (r < 0) { + DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r); + return r; } r = amdgpu_bo_kmap(bo, &ptr); if (r) { - DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); + DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r); return r; } @@ -813,14 +812,24 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) return 0; } +static int amdgpu_uvd_free_job( + struct amdgpu_job *job) +{ + amdgpu_ib_free(job->adev, job->ibs); + kfree(job->ibs); + return 0; +} + static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, - struct amdgpu_fence **fence) + struct fence **fence) { struct ttm_validate_buffer tv; struct ww_acquire_ctx ticket; struct list_head head; - struct amdgpu_ib ib; + struct amdgpu_ib *ib = NULL; + struct fence *f = NULL; + struct amdgpu_device *adev = ring->adev; uint64_t addr; int i, r; @@ -842,34 +851,49 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); if (r) goto err; - - r = amdgpu_ib_get(ring, NULL, 64, &ib); - if (r) + ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); + if (!ib) { + r = -ENOMEM; goto err; + } + r = amdgpu_ib_get(ring, NULL, 64, ib); + if (r) + goto err1; addr = amdgpu_bo_gpu_offset(bo); - ib.ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); - ib.ptr[1] = addr; - ib.ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0); - ib.ptr[3] = addr >> 32; - ib.ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); - ib.ptr[5] = 0; + ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); + ib->ptr[1] = addr; + ib->ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0); + ib->ptr[3] = addr >> 32; + ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); + ib->ptr[5] = 0; for (i = 6; i < 16; ++i) - ib.ptr[i] = PACKET2(0); - ib.length_dw = 16; + ib->ptr[i] = PACKET2(0); + ib->length_dw = 16; - r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, + &amdgpu_uvd_free_job, + AMDGPU_FENCE_OWNER_UNDEFINED, + &f); if (r) - goto err; - ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base); + goto err2; - if (fence) - *fence = amdgpu_fence_ref(ib.fence); + ttm_eu_fence_buffer_objects(&ticket, &head, f); - amdgpu_ib_free(ring->adev, &ib); + if (fence) + *fence = fence_get(f); amdgpu_bo_unref(&bo); - return 0; + fence_put(f); + if (amdgpu_enable_scheduler) + return 0; + amdgpu_ib_free(ring->adev, ib); + kfree(ib); + return 0; +err2: + amdgpu_ib_free(ring->adev, ib); +err1: + kfree(ib); err: ttm_eu_backoff_reservation(&ticket, &head); return r; @@ -879,7 +903,7 @@ err: crash the vcpu so just try to emmit a dummy create/destroy msg to avoid this */ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_fence **fence) + struct fence **fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_bo *bo; @@ -887,7 +911,9 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, int r, i; r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo); + AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, &bo); if (r) return r; @@ -926,7 +952,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, } int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_fence **fence) + struct fence **fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_bo *bo; @@ -934,7 +960,9 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, int r, i; r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo); + AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, &bo); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index 2255aa710..1724c2c86 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -29,9 +29,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev); int amdgpu_uvd_suspend(struct amdgpu_device *adev); int amdgpu_uvd_resume(struct amdgpu_device *adev); int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_fence **fence); + struct fence **fence); int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_fence **fence); + struct fence **fence); void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index e136d2947..71feb239d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -48,6 +48,7 @@ #endif #define FIRMWARE_TONGA "/*(DEBLOBBED)*/" #define FIRMWARE_CARRIZO "/*(DEBLOBBED)*/" +#define FIRMWARE_FIJI "/*(DEBLOBBED)*/" #ifdef CONFIG_DRM_AMDGPU_CIK /*(DEBLOBBED)*/ @@ -96,6 +97,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) case CHIP_CARRIZO: fw_name = FIRMWARE_CARRIZO; break; + case CHIP_FIJI: + fw_name = FIRMWARE_FIJI; + break; default: return -EINVAL; @@ -131,7 +135,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) /* allocate firmware, stack and heap BO */ r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->vce.vcpu_bo); + AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, &adev->vce.vcpu_bo); if (r) { dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); return r; @@ -329,6 +335,14 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) } } +static int amdgpu_vce_free_job( + struct amdgpu_job *job) +{ + amdgpu_ib_free(job->adev, job->ibs); + kfree(job->ibs); + return 0; +} + /** * amdgpu_vce_get_create_msg - generate a VCE create msg * @@ -340,59 +354,69 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) * Open up a stream for HW test */ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_fence **fence) + struct fence **fence) { const unsigned ib_size_dw = 1024; - struct amdgpu_ib ib; + struct amdgpu_ib *ib = NULL; + struct fence *f = NULL; + struct amdgpu_device *adev = ring->adev; uint64_t dummy; int i, r; - r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib); + ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); + if (!ib) + return -ENOMEM; + r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + kfree(ib); return r; } - dummy = ib.gpu_addr + 1024; + dummy = ib->gpu_addr + 1024; /* stitch together an VCE create msg */ - ib.length_dw = 0; - ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ - ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ - ib.ptr[ib.length_dw++] = handle; - - ib.ptr[ib.length_dw++] = 0x00000030; /* len */ - ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */ - ib.ptr[ib.length_dw++] = 0x00000000; - ib.ptr[ib.length_dw++] = 0x00000042; - ib.ptr[ib.length_dw++] = 0x0000000a; - ib.ptr[ib.length_dw++] = 0x00000001; - ib.ptr[ib.length_dw++] = 0x00000080; - ib.ptr[ib.length_dw++] = 0x00000060; - ib.ptr[ib.length_dw++] = 0x00000100; - ib.ptr[ib.length_dw++] = 0x00000100; - ib.ptr[ib.length_dw++] = 0x0000000c; - ib.ptr[ib.length_dw++] = 0x00000000; - - ib.ptr[ib.length_dw++] = 0x00000014; /* len */ - ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ - ib.ptr[ib.length_dw++] = upper_32_bits(dummy); - ib.ptr[ib.length_dw++] = dummy; - ib.ptr[ib.length_dw++] = 0x00000001; - - for (i = ib.length_dw; i < ib_size_dw; ++i) - ib.ptr[i] = 0x0; - - r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); - if (r) { - DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); - } - + ib->length_dw = 0; + ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ + ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ + ib->ptr[ib->length_dw++] = handle; + + ib->ptr[ib->length_dw++] = 0x00000030; /* len */ + ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ + ib->ptr[ib->length_dw++] = 0x00000000; + ib->ptr[ib->length_dw++] = 0x00000042; + ib->ptr[ib->length_dw++] = 0x0000000a; + ib->ptr[ib->length_dw++] = 0x00000001; + ib->ptr[ib->length_dw++] = 0x00000080; + ib->ptr[ib->length_dw++] = 0x00000060; + ib->ptr[ib->length_dw++] = 0x00000100; + ib->ptr[ib->length_dw++] = 0x00000100; + ib->ptr[ib->length_dw++] = 0x0000000c; + ib->ptr[ib->length_dw++] = 0x00000000; + + ib->ptr[ib->length_dw++] = 0x00000014; /* len */ + ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ + ib->ptr[ib->length_dw++] = upper_32_bits(dummy); + ib->ptr[ib->length_dw++] = dummy; + ib->ptr[ib->length_dw++] = 0x00000001; + + for (i = ib->length_dw; i < ib_size_dw; ++i) + ib->ptr[i] = 0x0; + + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, + &amdgpu_vce_free_job, + AMDGPU_FENCE_OWNER_UNDEFINED, + &f); + if (r) + goto err; if (fence) - *fence = amdgpu_fence_ref(ib.fence); - - amdgpu_ib_free(ring->adev, &ib); - + *fence = fence_get(f); + fence_put(f); + if (amdgpu_enable_scheduler) + return 0; +err: + amdgpu_ib_free(adev, ib); + kfree(ib); return r; } @@ -407,49 +431,59 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, * Close up a stream for HW test or if userspace failed to do so */ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_fence **fence) + struct fence **fence) { const unsigned ib_size_dw = 1024; - struct amdgpu_ib ib; + struct amdgpu_ib *ib = NULL; + struct fence *f = NULL; + struct amdgpu_device *adev = ring->adev; uint64_t dummy; int i, r; - r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib); + ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); + if (!ib) + return -ENOMEM; + + r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib); if (r) { + kfree(ib); DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); return r; } - dummy = ib.gpu_addr + 1024; + dummy = ib->gpu_addr + 1024; /* stitch together an VCE destroy msg */ - ib.length_dw = 0; - ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ - ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ - ib.ptr[ib.length_dw++] = handle; - - ib.ptr[ib.length_dw++] = 0x00000014; /* len */ - ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ - ib.ptr[ib.length_dw++] = upper_32_bits(dummy); - ib.ptr[ib.length_dw++] = dummy; - ib.ptr[ib.length_dw++] = 0x00000001; - - ib.ptr[ib.length_dw++] = 0x00000008; /* len */ - ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */ - - for (i = ib.length_dw; i < ib_size_dw; ++i) - ib.ptr[i] = 0x0; - - r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); - if (r) { - DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); - } - + ib->length_dw = 0; + ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ + ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ + ib->ptr[ib->length_dw++] = handle; + + ib->ptr[ib->length_dw++] = 0x00000014; /* len */ + ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ + ib->ptr[ib->length_dw++] = upper_32_bits(dummy); + ib->ptr[ib->length_dw++] = dummy; + ib->ptr[ib->length_dw++] = 0x00000001; + + ib->ptr[ib->length_dw++] = 0x00000008; /* len */ + ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */ + + for (i = ib->length_dw; i < ib_size_dw; ++i) + ib->ptr[i] = 0x0; + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, + &amdgpu_vce_free_job, + AMDGPU_FENCE_OWNER_UNDEFINED, + &f); + if (r) + goto err; if (fence) - *fence = amdgpu_fence_ref(ib.fence); - - amdgpu_ib_free(ring->adev, &ib); - + *fence = fence_get(f); + fence_put(f); + if (amdgpu_enable_scheduler) + return 0; +err: + amdgpu_ib_free(adev, ib); + kfree(ib); return r; } @@ -795,9 +829,13 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) */ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring) { - struct amdgpu_fence *fence = NULL; + struct fence *fence = NULL; int r; + /* skip vce ring1 ib test for now, since it's not reliable */ + if (ring == &ring->adev->vce.ring[1]) + return 0; + r = amdgpu_vce_get_create_msg(ring, 1, NULL); if (r) { DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); @@ -810,13 +848,13 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring) goto error; } - r = amdgpu_fence_wait(fence, false); + r = fence_wait(fence, false); if (r) { DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); } else { DRM_INFO("ib test on ring %d succeeded\n", ring->idx); } error: - amdgpu_fence_unref(&fence); + fence_put(fence); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index 7ccdb5927..ba2da8ee5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -29,9 +29,9 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev); int amdgpu_vce_suspend(struct amdgpu_device *adev); int amdgpu_vce_resume(struct amdgpu_device *adev); int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_fence **fence); + struct fence **fence); int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_fence **fence); + struct fence **fence); void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b07402fc8..53d551f2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -127,16 +127,16 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, /** * amdgpu_vm_grab_id - allocate the next free VMID * - * @ring: ring we want to submit job to * @vm: vm to allocate id for + * @ring: ring we want to submit job to + * @sync: sync object where we add dependencies * - * Allocate an id for the vm (cayman+). - * Returns the fence we need to sync to (if any). + * Allocate an id for the vm, adding fences to the sync obj as necessary. * - * Global and local mutex must be locked! + * Global mutex must be locked! */ -struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, - struct amdgpu_vm *vm) +int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, + struct amdgpu_sync *sync) { struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {}; struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; @@ -148,7 +148,7 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, /* check if the id is still valid */ if (vm_id->id && vm_id->last_id_use && vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) - return NULL; + return 0; /* we definately need to flush */ vm_id->pd_gpu_addr = ~0ll; @@ -161,7 +161,7 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, /* found a free one */ vm_id->id = i; trace_amdgpu_vm_grab_id(i, ring->idx); - return NULL; + return 0; } if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) { @@ -172,15 +172,19 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, for (i = 0; i < 2; ++i) { if (choices[i]) { + struct amdgpu_fence *fence; + + fence = adev->vm_manager.active[choices[i]]; vm_id->id = choices[i]; + trace_amdgpu_vm_grab_id(choices[i], ring->idx); - return adev->vm_manager.active[choices[i]]; + return amdgpu_sync_fence(ring->adev, sync, &fence->base); } } /* should never happen */ BUG(); - return NULL; + return -EINVAL; } /** @@ -196,17 +200,29 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, */ void amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_vm *vm, - struct amdgpu_fence *updates) + struct fence *updates) { uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; + struct fence *flushed_updates = vm_id->flushed_updates; + bool is_earlier = false; + + if (flushed_updates && updates) { + BUG_ON(flushed_updates->context != updates->context); + is_earlier = (updates->seqno - flushed_updates->seqno <= + INT_MAX) ? true : false; + } - if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates || - amdgpu_fence_is_earlier(vm_id->flushed_updates, updates)) { + if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates || + is_earlier) { trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); - amdgpu_fence_unref(&vm_id->flushed_updates); - vm_id->flushed_updates = amdgpu_fence_ref(updates); + if (is_earlier) { + vm_id->flushed_updates = fence_get(updates); + fence_put(flushed_updates); + } + if (!flushed_updates) + vm_id->flushed_updates = fence_get(updates); vm_id->pd_gpu_addr = pd_addr; amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); } @@ -300,6 +316,15 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, } } +int amdgpu_vm_free_job(struct amdgpu_job *job) +{ + int i; + for (i = 0; i < job->num_ibs; i++) + amdgpu_ib_free(job->adev, &job->ibs[i]); + kfree(job->ibs); + return 0; +} + /** * amdgpu_vm_clear_bo - initially clear the page dir/table * @@ -310,7 +335,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_bo *bo) { struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; - struct amdgpu_ib ib; + struct fence *fence = NULL; + struct amdgpu_ib *ib; unsigned entries; uint64_t addr; int r; @@ -330,24 +356,33 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, addr = amdgpu_bo_gpu_offset(bo); entries = amdgpu_bo_size(bo) / 8; - r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, &ib); - if (r) + ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); + if (!ib) goto error_unreserve; - ib.length_dw = 0; - - amdgpu_vm_update_pages(adev, &ib, addr, 0, entries, 0, 0, 0); - amdgpu_vm_pad_ib(adev, &ib); - WARN_ON(ib.length_dw > 64); - - r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM); + r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); if (r) goto error_free; - amdgpu_bo_fence(bo, ib.fence, true); - + ib->length_dw = 0; + + amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0); + amdgpu_vm_pad_ib(adev, ib); + WARN_ON(ib->length_dw > 64); + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, + &amdgpu_vm_free_job, + AMDGPU_FENCE_OWNER_VM, + &fence); + if (!r) + amdgpu_bo_fence(bo, fence, true); + fence_put(fence); + if (amdgpu_enable_scheduler) { + amdgpu_bo_unreserve(bo); + return 0; + } error_free: - amdgpu_ib_free(adev, &ib); + amdgpu_ib_free(adev, ib); + kfree(ib); error_unreserve: amdgpu_bo_unreserve(bo); @@ -400,7 +435,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; uint64_t last_pde = ~0, last_pt = ~0; unsigned count = 0, pt_idx, ndw; - struct amdgpu_ib ib; + struct amdgpu_ib *ib; + struct fence *fence = NULL; + int r; /* padding, etc. */ @@ -413,10 +450,16 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, if (ndw > 0xfffff) return -ENOMEM; - r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib); - if (r) + ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); + if (!ib) + return -ENOMEM; + + r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); + if (r) { + kfree(ib); return r; - ib.length_dw = 0; + } + ib->length_dw = 0; /* walk over the address space and update the page directory */ for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { @@ -436,7 +479,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ((last_pt + incr * count) != pt)) { if (count) { - amdgpu_vm_update_pages(adev, &ib, last_pde, + amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count, incr, AMDGPU_PTE_VALID, 0); } @@ -450,23 +493,37 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, } if (count) - amdgpu_vm_update_pages(adev, &ib, last_pde, last_pt, count, + amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count, incr, AMDGPU_PTE_VALID, 0); - if (ib.length_dw != 0) { - amdgpu_vm_pad_ib(adev, &ib); - amdgpu_sync_resv(adev, &ib.sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); - WARN_ON(ib.length_dw > ndw); - r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM); - if (r) { - amdgpu_ib_free(adev, &ib); - return r; - } - amdgpu_bo_fence(pd, ib.fence, true); + if (ib->length_dw != 0) { + amdgpu_vm_pad_ib(adev, ib); + amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); + WARN_ON(ib->length_dw > ndw); + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, + &amdgpu_vm_free_job, + AMDGPU_FENCE_OWNER_VM, + &fence); + if (r) + goto error_free; + + amdgpu_bo_fence(pd, fence, true); + fence_put(vm->page_directory_fence); + vm->page_directory_fence = fence_get(fence); + fence_put(fence); + } + + if (!amdgpu_enable_scheduler || ib->length_dw == 0) { + amdgpu_ib_free(adev, ib); + kfree(ib); } - amdgpu_ib_free(adev, &ib); return 0; + +error_free: + amdgpu_ib_free(adev, ib); + kfree(ib); + return r; } /** @@ -572,9 +629,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev, { uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; uint64_t last_pte = ~0, last_dst = ~0; + void *owner = AMDGPU_FENCE_OWNER_VM; unsigned count = 0; uint64_t addr; + /* sync to everything on unmapping */ + if (!(flags & AMDGPU_PTE_VALID)) + owner = AMDGPU_FENCE_OWNER_UNDEFINED; + /* walk over the address space and update the page tables */ for (addr = start; addr < end; ) { uint64_t pt_idx = addr >> amdgpu_vm_block_size; @@ -583,8 +645,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev, uint64_t pte; int r; - amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, - AMDGPU_FENCE_OWNER_VM); + amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner); r = reservation_object_reserve_shared(pt->tbo.resv); if (r) return r; @@ -626,31 +687,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev, return 0; } -/** - * amdgpu_vm_fence_pts - fence page tables after an update - * - * @vm: requested vm - * @start: start of GPU address range - * @end: end of GPU address range - * @fence: fence to use - * - * Fence the page tables in the range @start - @end (cayman+). - * - * Global and local mutex must be locked! - */ -static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm, - uint64_t start, uint64_t end, - struct amdgpu_fence *fence) -{ - unsigned i; - - start >>= amdgpu_vm_block_size; - end >>= amdgpu_vm_block_size; - - for (i = start; i <= end; ++i) - amdgpu_bo_fence(vm->page_tables[i].bo, fence, true); -} - /** * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table * @@ -670,12 +706,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_va_mapping *mapping, uint64_t addr, uint32_t gtt_flags, - struct amdgpu_fence **fence) + struct fence **fence) { struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; unsigned nptes, ncmds, ndw; uint32_t flags = gtt_flags; - struct amdgpu_ib ib; + struct amdgpu_ib *ib; + struct fence *f = NULL; int r; /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here @@ -722,46 +759,53 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (ndw > 0xfffff) return -ENOMEM; - r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib); - if (r) - return r; - ib.length_dw = 0; - - if (!(flags & AMDGPU_PTE_VALID)) { - unsigned i; + ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); + if (!ib) + return -ENOMEM; - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - struct amdgpu_fence *f = vm->ids[i].last_id_use; - amdgpu_sync_fence(&ib.sync, f); - } + r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); + if (r) { + kfree(ib); + return r; } - r = amdgpu_vm_update_ptes(adev, vm, &ib, mapping->it.start, + ib->length_dw = 0; + + r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start, mapping->it.last + 1, addr + mapping->offset, flags, gtt_flags); if (r) { - amdgpu_ib_free(adev, &ib); + amdgpu_ib_free(adev, ib); + kfree(ib); return r; } - amdgpu_vm_pad_ib(adev, &ib); - WARN_ON(ib.length_dw > ndw); + amdgpu_vm_pad_ib(adev, ib); + WARN_ON(ib->length_dw > ndw); + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, + &amdgpu_vm_free_job, + AMDGPU_FENCE_OWNER_VM, + &f); + if (r) + goto error_free; - r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM); - if (r) { - amdgpu_ib_free(adev, &ib); - return r; - } - amdgpu_vm_fence_pts(vm, mapping->it.start, - mapping->it.last + 1, ib.fence); + amdgpu_bo_fence(vm->page_directory, f, true); if (fence) { - amdgpu_fence_unref(fence); - *fence = amdgpu_fence_ref(ib.fence); + fence_put(*fence); + *fence = fence_get(f); + } + fence_put(f); + if (!amdgpu_enable_scheduler) { + amdgpu_ib_free(adev, ib); + kfree(ib); } - amdgpu_ib_free(adev, &ib); - return 0; + +error_free: + amdgpu_ib_free(adev, ib); + kfree(ib); + return r; } /** @@ -794,21 +838,25 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, addr = 0; } - if (addr == bo_va->addr) - return 0; - flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); - list_for_each_entry(mapping, &bo_va->mappings, list) { + spin_lock(&vm->status_lock); + if (!list_empty(&bo_va->vm_status)) + list_splice_init(&bo_va->valids, &bo_va->invalids); + spin_unlock(&vm->status_lock); + + list_for_each_entry(mapping, &bo_va->invalids, list) { r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr, flags, &bo_va->last_pt_update); if (r) return r; } - bo_va->addr = addr; spin_lock(&vm->status_lock); + list_splice_init(&bo_va->invalids, &bo_va->valids); list_del_init(&bo_va->vm_status); + if (!mem) + list_add(&bo_va->vm_status, &vm->cleared); spin_unlock(&vm->status_lock); return 0; @@ -861,7 +909,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_sync *sync) { struct amdgpu_bo_va *bo_va = NULL; - int r; + int r = 0; spin_lock(&vm->status_lock); while (!list_empty(&vm->invalidated)) { @@ -878,8 +926,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, spin_unlock(&vm->status_lock); if (bo_va) - amdgpu_sync_fence(sync, bo_va->last_pt_update); - return 0; + r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update); + + return r; } /** @@ -907,10 +956,10 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, } bo_va->vm = vm; bo_va->bo = bo; - bo_va->addr = 0; bo_va->ref_count = 1; INIT_LIST_HEAD(&bo_va->bo_list); - INIT_LIST_HEAD(&bo_va->mappings); + INIT_LIST_HEAD(&bo_va->valids); + INIT_LIST_HEAD(&bo_va->invalids); INIT_LIST_HEAD(&bo_va->vm_status); mutex_lock(&vm->mutex); @@ -999,12 +1048,10 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, mapping->offset = offset; mapping->flags = flags; - list_add(&mapping->list, &bo_va->mappings); + list_add(&mapping->list, &bo_va->invalids); interval_tree_insert(&mapping->it, &vm->va); trace_amdgpu_vm_bo_map(bo_va, mapping); - bo_va->addr = 0; - /* Make sure the page tables are allocated */ saddr >>= amdgpu_vm_block_size; eaddr >>= amdgpu_vm_block_size; @@ -1018,6 +1065,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, /* walk over the address space and allocate the page tables */ for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { + struct reservation_object *resv = vm->page_directory->tbo.resv; struct amdgpu_bo *pt; if (vm->page_tables[pt_idx].bo) @@ -1026,9 +1074,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, /* drop mutex to allocate and clear page table */ mutex_unlock(&vm->mutex); + ww_mutex_lock(&resv->lock, NULL); r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, AMDGPU_GPU_PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &pt); + AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_NO_CPU_ACCESS, + NULL, resv, &pt); + ww_mutex_unlock(&resv->lock); if (r) goto error_free; @@ -1085,17 +1137,27 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, { struct amdgpu_bo_va_mapping *mapping; struct amdgpu_vm *vm = bo_va->vm; + bool valid = true; saddr /= AMDGPU_GPU_PAGE_SIZE; - list_for_each_entry(mapping, &bo_va->mappings, list) { + list_for_each_entry(mapping, &bo_va->valids, list) { if (mapping->it.start == saddr) break; } - if (&mapping->list == &bo_va->mappings) { - amdgpu_bo_unreserve(bo_va->bo); - return -ENOENT; + if (&mapping->list == &bo_va->valids) { + valid = false; + + list_for_each_entry(mapping, &bo_va->invalids, list) { + if (mapping->it.start == saddr) + break; + } + + if (&mapping->list == &bo_va->invalids) { + amdgpu_bo_unreserve(bo_va->bo); + return -ENOENT; + } } mutex_lock(&vm->mutex); @@ -1103,12 +1165,10 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, interval_tree_remove(&mapping->it, &vm->va); trace_amdgpu_vm_bo_unmap(bo_va, mapping); - if (bo_va->addr) { - /* clear the old address */ + if (valid) list_add(&mapping->list, &vm->freed); - } else { + else kfree(mapping); - } mutex_unlock(&vm->mutex); amdgpu_bo_unreserve(bo_va->bo); @@ -1139,16 +1199,19 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, list_del(&bo_va->vm_status); spin_unlock(&vm->status_lock); - list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) { + list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { list_del(&mapping->list); interval_tree_remove(&mapping->it, &vm->va); trace_amdgpu_vm_bo_unmap(bo_va, mapping); - if (bo_va->addr) - list_add(&mapping->list, &vm->freed); - else - kfree(mapping); + list_add(&mapping->list, &vm->freed); + } + list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { + list_del(&mapping->list); + interval_tree_remove(&mapping->it, &vm->va); + kfree(mapping); } - amdgpu_fence_unref(&bo_va->last_pt_update); + + fence_put(bo_va->last_pt_update); kfree(bo_va); mutex_unlock(&vm->mutex); @@ -1169,12 +1232,10 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va; list_for_each_entry(bo_va, &bo->va, bo_list) { - if (bo_va->addr) { - spin_lock(&bo_va->vm->status_lock); - list_del(&bo_va->vm_status); + spin_lock(&bo_va->vm->status_lock); + if (list_empty(&bo_va->vm_status)) list_add(&bo_va->vm_status, &bo_va->vm->invalidated); - spin_unlock(&bo_va->vm->status_lock); - } + spin_unlock(&bo_va->vm->status_lock); } } @@ -1202,6 +1263,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) vm->va = RB_ROOT; spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->invalidated); + INIT_LIST_HEAD(&vm->cleared); INIT_LIST_HEAD(&vm->freed); pd_size = amdgpu_vm_directory_size(adev); @@ -1215,9 +1277,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) return -ENOMEM; } + vm->page_directory_fence = NULL; + r = amdgpu_bo_create(adev, pd_size, align, true, - AMDGPU_GEM_DOMAIN_VRAM, 0, - NULL, &vm->page_directory); + AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_NO_CPU_ACCESS, + NULL, NULL, &vm->page_directory); if (r) return r; @@ -1263,9 +1328,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) kfree(vm->page_tables); amdgpu_bo_unref(&vm->page_directory); + fence_put(vm->page_directory_fence); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - amdgpu_fence_unref(&vm->ids[i].flushed_updates); + fence_put(vm->ids[i].flushed_updates); amdgpu_fence_unref(&vm->ids[i].last_id_use); } diff --git a/drivers/gpu/drm/amd/amdgpu/atom-bits.h b/drivers/gpu/drm/amd/amdgpu/atom-bits.h deleted file mode 100644 index e8fae5c77..000000000 --- a/drivers/gpu/drm/amd/amdgpu/atom-bits.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2008 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Author: Stanislaw Skowronek - */ - -#ifndef ATOM_BITS_H -#define ATOM_BITS_H - -static inline uint8_t get_u8(void *bios, int ptr) -{ - return ((unsigned char *)bios)[ptr]; -} -#define U8(ptr) get_u8(ctx->ctx->bios, (ptr)) -#define CU8(ptr) get_u8(ctx->bios, (ptr)) -static inline uint16_t get_u16(void *bios, int ptr) -{ - return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8); -} -#define U16(ptr) get_u16(ctx->ctx->bios, (ptr)) -#define CU16(ptr) get_u16(ctx->bios, (ptr)) -static inline uint32_t get_u32(void *bios, int ptr) -{ - return get_u16(bios, ptr)|(((uint32_t)get_u16(bios, ptr+2))<<16); -} -#define U32(ptr) get_u32(ctx->ctx->bios, (ptr)) -#define CU32(ptr) get_u32(ctx->bios, (ptr)) -#define CSTR(ptr) (((char *)(ctx->bios))+(ptr)) - -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/atom-names.h b/drivers/gpu/drm/amd/amdgpu/atom-names.h deleted file mode 100644 index 6f907a5ff..000000000 --- a/drivers/gpu/drm/amd/amdgpu/atom-names.h +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright 2008 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Author: Stanislaw Skowronek - */ - -#ifndef ATOM_NAMES_H -#define ATOM_NAMES_H - -#include "atom.h" - -#ifdef ATOM_DEBUG - -#define ATOM_OP_NAMES_CNT 123 -static char *atom_op_names[ATOM_OP_NAMES_CNT] = { -"RESERVED", "MOVE_REG", "MOVE_PS", "MOVE_WS", "MOVE_FB", "MOVE_PLL", -"MOVE_MC", "AND_REG", "AND_PS", "AND_WS", "AND_FB", "AND_PLL", "AND_MC", -"OR_REG", "OR_PS", "OR_WS", "OR_FB", "OR_PLL", "OR_MC", "SHIFT_LEFT_REG", -"SHIFT_LEFT_PS", "SHIFT_LEFT_WS", "SHIFT_LEFT_FB", "SHIFT_LEFT_PLL", -"SHIFT_LEFT_MC", "SHIFT_RIGHT_REG", "SHIFT_RIGHT_PS", "SHIFT_RIGHT_WS", -"SHIFT_RIGHT_FB", "SHIFT_RIGHT_PLL", "SHIFT_RIGHT_MC", "MUL_REG", -"MUL_PS", "MUL_WS", "MUL_FB", "MUL_PLL", "MUL_MC", "DIV_REG", "DIV_PS", -"DIV_WS", "DIV_FB", "DIV_PLL", "DIV_MC", "ADD_REG", "ADD_PS", "ADD_WS", -"ADD_FB", "ADD_PLL", "ADD_MC", "SUB_REG", "SUB_PS", "SUB_WS", "SUB_FB", -"SUB_PLL", "SUB_MC", "SET_ATI_PORT", "SET_PCI_PORT", "SET_SYS_IO_PORT", -"SET_REG_BLOCK", "SET_FB_BASE", "COMPARE_REG", "COMPARE_PS", -"COMPARE_WS", "COMPARE_FB", "COMPARE_PLL", "COMPARE_MC", "SWITCH", -"JUMP", "JUMP_EQUAL", "JUMP_BELOW", "JUMP_ABOVE", "JUMP_BELOW_OR_EQUAL", -"JUMP_ABOVE_OR_EQUAL", "JUMP_NOT_EQUAL", "TEST_REG", "TEST_PS", "TEST_WS", -"TEST_FB", "TEST_PLL", "TEST_MC", "DELAY_MILLISEC", "DELAY_MICROSEC", -"CALL_TABLE", "REPEAT", "CLEAR_REG", "CLEAR_PS", "CLEAR_WS", "CLEAR_FB", -"CLEAR_PLL", "CLEAR_MC", "NOP", "EOT", "MASK_REG", "MASK_PS", "MASK_WS", -"MASK_FB", "MASK_PLL", "MASK_MC", "POST_CARD", "BEEP", "SAVE_REG", -"RESTORE_REG", "SET_DATA_BLOCK", "XOR_REG", "XOR_PS", "XOR_WS", "XOR_FB", -"XOR_PLL", "XOR_MC", "SHL_REG", "SHL_PS", "SHL_WS", "SHL_FB", "SHL_PLL", -"SHL_MC", "SHR_REG", "SHR_PS", "SHR_WS", "SHR_FB", "SHR_PLL", "SHR_MC", -"DEBUG", "CTB_DS", -}; - -#define ATOM_TABLE_NAMES_CNT 74 -static char *atom_table_names[ATOM_TABLE_NAMES_CNT] = { -"ASIC_Init", "GetDisplaySurfaceSize", "ASIC_RegistersInit", -"VRAM_BlockVenderDetection", "SetClocksRatio", "MemoryControllerInit", -"GPIO_PinInit", "MemoryParamAdjust", "DVOEncoderControl", -"GPIOPinControl", "SetEngineClock", "SetMemoryClock", "SetPixelClock", -"DynamicClockGating", "ResetMemoryDLL", "ResetMemoryDevice", -"MemoryPLLInit", "EnableMemorySelfRefresh", "AdjustMemoryController", -"EnableASIC_StaticPwrMgt", "ASIC_StaticPwrMgtStatusChange", -"DAC_LoadDetection", "TMDS2EncoderControl", "LCD1OutputControl", -"DAC1EncoderControl", "DAC2EncoderControl", "DVOOutputControl", -"CV1OutputControl", "SetCRTC_DPM_State", "TVEncoderControl", -"TMDS1EncoderControl", "LVDSEncoderControl", "TV1OutputControl", -"EnableScaler", "BlankCRTC", "EnableCRTC", "GetPixelClock", -"EnableVGA_Render", "EnableVGA_Access", "SetCRTC_Timing", -"SetCRTC_OverScan", "SetCRTC_Replication", "SelectCRTC_Source", -"EnableGraphSurfaces", "UpdateCRTC_DoubleBufferRegisters", -"LUT_AutoFill", "EnableHW_IconCursor", "GetMemoryClock", -"GetEngineClock", "SetCRTC_UsingDTDTiming", "TVBootUpStdPinDetection", -"DFP2OutputControl", "VRAM_BlockDetectionByStrap", "MemoryCleanUp", -"ReadEDIDFromHWAssistedI2C", "WriteOneByteToHWAssistedI2C", -"ReadHWAssistedI2CStatus", "SpeedFanControl", "PowerConnectorDetection", -"MC_Synchronization", "ComputeMemoryEnginePLL", "MemoryRefreshConversion", -"VRAM_GetCurrentInfoBlock", "DynamicMemorySettings", "MemoryTraining", -"EnableLVDS_SS", "DFP1OutputControl", "SetVoltage", "CRT1OutputControl", -"CRT2OutputControl", "SetupHWAssistedI2CStatus", "ClockSource", -"MemoryDeviceInit", "EnableYUV", -}; - -#define ATOM_IO_NAMES_CNT 5 -static char *atom_io_names[ATOM_IO_NAMES_CNT] = { -"MM", "PLL", "MC", "PCIE", "PCIE PORT", -}; - -#else - -#define ATOM_OP_NAMES_CNT 0 -#define ATOM_TABLE_NAMES_CNT 0 -#define ATOM_IO_NAMES_CNT 0 - -#endif - -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/atom-types.h b/drivers/gpu/drm/amd/amdgpu/atom-types.h deleted file mode 100644 index 1125b866c..000000000 --- a/drivers/gpu/drm/amd/amdgpu/atom-types.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2008 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Author: Dave Airlie - */ - -#ifndef ATOM_TYPES_H -#define ATOM_TYPES_H - -/* sync atom types to kernel types */ - -typedef uint16_t USHORT; -typedef uint32_t ULONG; -typedef uint8_t UCHAR; - - -#ifndef ATOM_BIG_ENDIAN -#if defined(__BIG_ENDIAN) -#define ATOM_BIG_ENDIAN 1 -#else -#define ATOM_BIG_ENDIAN 0 -#endif -#endif -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/atombios.h b/drivers/gpu/drm/amd/amdgpu/atombios.h deleted file mode 100644 index 44c5d4a4d..000000000 --- a/drivers/gpu/drm/amd/amdgpu/atombios.h +++ /dev/null @@ -1,8555 +0,0 @@ -/* - * Copyright 2006-2007 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - - -/****************************************************************************/ -/*Portion I: Definitions shared between VBIOS and Driver */ -/****************************************************************************/ - -#ifndef _ATOMBIOS_H -#define _ATOMBIOS_H - -#define ATOM_VERSION_MAJOR 0x00020000 -#define ATOM_VERSION_MINOR 0x00000002 - -#define ATOM_HEADER_VERSION (ATOM_VERSION_MAJOR | ATOM_VERSION_MINOR) - -/* Endianness should be specified before inclusion, - * default to little endian - */ -#ifndef ATOM_BIG_ENDIAN -#error Endian not specified -#endif - -#ifdef _H2INC - #ifndef ULONG - typedef unsigned long ULONG; - #endif - - #ifndef UCHAR - typedef unsigned char UCHAR; - #endif - - #ifndef USHORT - typedef unsigned short USHORT; - #endif -#endif - -#define ATOM_DAC_A 0 -#define ATOM_DAC_B 1 -#define ATOM_EXT_DAC 2 - -#define ATOM_CRTC1 0 -#define ATOM_CRTC2 1 -#define ATOM_CRTC3 2 -#define ATOM_CRTC4 3 -#define ATOM_CRTC5 4 -#define ATOM_CRTC6 5 - -#define ATOM_UNDERLAY_PIPE0 16 -#define ATOM_UNDERLAY_PIPE1 17 - -#define ATOM_CRTC_INVALID 0xFF - -#define ATOM_DIGA 0 -#define ATOM_DIGB 1 - -#define ATOM_PPLL1 0 -#define ATOM_PPLL2 1 -#define ATOM_DCPLL 2 -#define ATOM_PPLL0 2 -#define ATOM_PPLL3 3 - -#define ATOM_EXT_PLL1 8 -#define ATOM_EXT_PLL2 9 -#define ATOM_EXT_CLOCK 10 -#define ATOM_PPLL_INVALID 0xFF - -#define ENCODER_REFCLK_SRC_P1PLL 0 -#define ENCODER_REFCLK_SRC_P2PLL 1 -#define ENCODER_REFCLK_SRC_DCPLL 2 -#define ENCODER_REFCLK_SRC_EXTCLK 3 -#define ENCODER_REFCLK_SRC_INVALID 0xFF - -#define ATOM_SCALER_DISABLE 0 //For Fudo, it's bypass and auto-cengter & no replication -#define ATOM_SCALER_CENTER 1 //For Fudo, it's bypass and auto-center & auto replication -#define ATOM_SCALER_EXPANSION 2 //For Fudo, it's 2 Tap alpha blending mode -#define ATOM_SCALER_MULTI_EX 3 //For Fudo only, it's multi-tap mode only used to drive TV or CV, only used by Bios - -#define ATOM_DISABLE 0 -#define ATOM_ENABLE 1 -#define ATOM_LCD_BLOFF (ATOM_DISABLE+2) -#define ATOM_LCD_BLON (ATOM_ENABLE+2) -#define ATOM_LCD_BL_BRIGHTNESS_CONTROL (ATOM_ENABLE+3) -#define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5) -#define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5) -#define ATOM_ENCODER_INIT (ATOM_DISABLE+7) -#define ATOM_INIT (ATOM_DISABLE+7) -#define ATOM_GET_STATUS (ATOM_DISABLE+8) - -#define ATOM_BLANKING 1 -#define ATOM_BLANKING_OFF 0 - - -#define ATOM_CRT1 0 -#define ATOM_CRT2 1 - -#define ATOM_TV_NTSC 1 -#define ATOM_TV_NTSCJ 2 -#define ATOM_TV_PAL 3 -#define ATOM_TV_PALM 4 -#define ATOM_TV_PALCN 5 -#define ATOM_TV_PALN 6 -#define ATOM_TV_PAL60 7 -#define ATOM_TV_SECAM 8 -#define ATOM_TV_CV 16 - -#define ATOM_DAC1_PS2 1 -#define ATOM_DAC1_CV 2 -#define ATOM_DAC1_NTSC 3 -#define ATOM_DAC1_PAL 4 - -#define ATOM_DAC2_PS2 ATOM_DAC1_PS2 -#define ATOM_DAC2_CV ATOM_DAC1_CV -#define ATOM_DAC2_NTSC ATOM_DAC1_NTSC -#define ATOM_DAC2_PAL ATOM_DAC1_PAL - -#define ATOM_PM_ON 0 -#define ATOM_PM_STANDBY 1 -#define ATOM_PM_SUSPEND 2 -#define ATOM_PM_OFF 3 - -// For ATOM_LVDS_INFO_V12 -// Bit0:{=0:single, =1:dual}, -// Bit1 {=0:666RGB, =1:888RGB}, -// Bit2:3:{Grey level} -// Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} -#define ATOM_PANEL_MISC_DUAL 0x00000001 -#define ATOM_PANEL_MISC_888RGB 0x00000002 -#define ATOM_PANEL_MISC_GREY_LEVEL 0x0000000C -#define ATOM_PANEL_MISC_FPDI 0x00000010 -#define ATOM_PANEL_MISC_GREY_LEVEL_SHIFT 2 -#define ATOM_PANEL_MISC_SPATIAL 0x00000020 -#define ATOM_PANEL_MISC_TEMPORAL 0x00000040 -#define ATOM_PANEL_MISC_API_ENABLED 0x00000080 - -#define MEMTYPE_DDR1 "DDR1" -#define MEMTYPE_DDR2 "DDR2" -#define MEMTYPE_DDR3 "DDR3" -#define MEMTYPE_DDR4 "DDR4" - -#define ASIC_BUS_TYPE_PCI "PCI" -#define ASIC_BUS_TYPE_AGP "AGP" -#define ASIC_BUS_TYPE_PCIE "PCI_EXPRESS" - -//Maximum size of that FireGL flag string -#define ATOM_FIREGL_FLAG_STRING "FGL" //Flag used to enable FireGL Support -#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 //sizeof( ATOM_FIREGL_FLAG_STRING ) - -#define ATOM_FAKE_DESKTOP_STRING "DSK" //Flag used to enable mobile ASIC on Desktop -#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING - -#define ATOM_M54T_FLAG_STRING "M54T" //Flag used to enable M54T Support -#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 //sizeof( ATOM_M54T_FLAG_STRING ) - -#define HW_ASSISTED_I2C_STATUS_FAILURE 2 -#define HW_ASSISTED_I2C_STATUS_SUCCESS 1 - -#pragma pack(1) // BIOS data must use byte aligment - -// Define offset to location of ROM header. -#define OFFSET_TO_POINTER_TO_ATOM_ROM_HEADER 0x00000048L -#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE 0x00000002L - -#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE 0x94 -#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 //including the terminator 0x0! -#define OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER 0x002f -#define OFFSET_TO_GET_ATOMBIOS_STRINGS_START 0x006e - -/****************************************************************************/ -// Common header for all tables (Data table, Command table). -// Every table pointed _ATOM_MASTER_DATA_TABLE has this common header. -// And the pointer actually points to this header. -/****************************************************************************/ - -typedef struct _ATOM_COMMON_TABLE_HEADER -{ - USHORT usStructureSize; - UCHAR ucTableFormatRevision; //Change it when the Parser is not backward compatible - UCHAR ucTableContentRevision; //Change it only when the table needs to change but the firmware - //Image can't be updated, while Driver needs to carry the new table! -}ATOM_COMMON_TABLE_HEADER; - -/****************************************************************************/ -// Structure stores the ROM header. -/****************************************************************************/ -typedef struct _ATOM_ROM_HEADER -{ - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR uaFirmWareSignature[4]; //Signature to distinguish between Atombios and non-atombios, - //atombios should init it as "ATOM", don't change the position - USHORT usBiosRuntimeSegmentAddress; - USHORT usProtectedModeInfoOffset; - USHORT usConfigFilenameOffset; - USHORT usCRC_BlockOffset; - USHORT usBIOS_BootupMessageOffset; - USHORT usInt10Offset; - USHORT usPciBusDevInitCode; - USHORT usIoBaseAddress; - USHORT usSubsystemVendorID; - USHORT usSubsystemID; - USHORT usPCI_InfoOffset; - USHORT usMasterCommandTableOffset;//Offest for SW to get all command table offsets, Don't change the position - USHORT usMasterDataTableOffset; //Offest for SW to get all data table offsets, Don't change the position - UCHAR ucExtendedFunctionCode; - UCHAR ucReserved; -}ATOM_ROM_HEADER; - -//==============================Command Table Portion==================================== - - -/****************************************************************************/ -// Structures used in Command.mtb -/****************************************************************************/ -typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{ - USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1 - USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON - USHORT ASIC_RegistersInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init - USHORT VRAM_BlockVenderDetection; //Atomic Table, used only by Bios - USHORT DIGxEncoderControl; //Only used by Bios - USHORT MemoryControllerInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init - USHORT EnableCRTCMemReq; //Function Table,directly used by various SW components,latest version 2.1 - USHORT MemoryParamAdjust; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed - USHORT DVOEncoderControl; //Function Table,directly used by various SW components,latest version 1.2 - USHORT GPIOPinControl; //Atomic Table, only used by Bios - USHORT SetEngineClock; //Function Table,directly used by various SW components,latest version 1.1 - USHORT SetMemoryClock; //Function Table,directly used by various SW components,latest version 1.1 - USHORT SetPixelClock; //Function Table,directly used by various SW components,latest version 1.2 - USHORT EnableDispPowerGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init - USHORT ResetMemoryDLL; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock - USHORT ResetMemoryDevice; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock - USHORT MemoryPLLInit; //Atomic Table, used only by Bios - USHORT AdjustDisplayPll; //Atomic Table, used by various SW componentes. - USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock - USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios - USHORT SetUniphyInstance; //Atomic Table, only used by Bios - USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2 - USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3 - USHORT HW_Misc_Operation; //Atomic Table, directly used by various SW components,latest version 1.1 - USHORT DAC1EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1 - USHORT DAC2EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1 - USHORT DVOOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 - USHORT CV1OutputControl; //Atomic Table, Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead - USHORT GetConditionalGoldenSetting; //Only used by Bios - USHORT SMC_Init; //Function Table,directly used by various SW components,latest version 1.1 - USHORT PatchMCSetting; //only used by BIOS - USHORT MC_SEQ_Control; //only used by BIOS - USHORT Gfx_Harvesting; //Atomic Table, Obsolete from Ry6xx, Now only used by BIOS for GFX harvesting - USHORT EnableScaler; //Atomic Table, used only by Bios - USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1 - USHORT EnableCRTC; //Atomic Table, directly used by various SW components,latest version 1.1 - USHORT GetPixelClock; //Atomic Table, directly used by various SW components,latest version 1.1 - USHORT EnableVGA_Render; //Function Table,directly used by various SW components,latest version 1.1 - USHORT GetSCLKOverMCLKRatio; //Atomic Table, only used by Bios - USHORT SetCRTC_Timing; //Atomic Table, directly used by various SW components,latest version 1.1 - USHORT SetCRTC_OverScan; //Atomic Table, used by various SW components,latest version 1.1 - USHORT SetCRTC_Replication; //Atomic Table, used only by Bios - USHORT SelectCRTC_Source; //Atomic Table, directly used by various SW components,latest version 1.1 - USHORT EnableGraphSurfaces; //Atomic Table, used only by Bios - USHORT UpdateCRTC_DoubleBufferRegisters; //Atomic Table, used only by Bios - USHORT LUT_AutoFill; //Atomic Table, only used by Bios - USHORT EnableHW_IconCursor; //Atomic Table, only used by Bios - USHORT GetMemoryClock; //Atomic Table, directly used by various SW components,latest version 1.1 - USHORT GetEngineClock; //Atomic Table, directly used by various SW components,latest version 1.1 - USHORT SetCRTC_UsingDTDTiming; //Atomic Table, directly used by various SW components,latest version 1.1 - USHORT ExternalEncoderControl; //Atomic Table, directly used by various SW components,latest version 2.1 - USHORT LVTMAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 - USHORT VRAM_BlockDetectionByStrap; //Atomic Table, used only by Bios - USHORT MemoryCleanUp; //Atomic Table, only used by Bios - USHORT ProcessI2cChannelTransaction; //Function Table,only used by Bios - USHORT WriteOneByteToHWAssistedI2C; //Function Table,indirectly used by various SW components - USHORT ReadHWAssistedI2CStatus; //Atomic Table, indirectly used by various SW components - USHORT SpeedFanControl; //Function Table,indirectly used by various SW components,called from ASIC_Init - USHORT PowerConnectorDetection; //Atomic Table, directly used by various SW components,latest version 1.1 - USHORT MC_Synchronization; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock - USHORT ComputeMemoryEnginePLL; //Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock - USHORT MemoryRefreshConversion; //Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock - USHORT VRAM_GetCurrentInfoBlock; //Atomic Table, used only by Bios - USHORT DynamicMemorySettings; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock - USHORT MemoryTraining; //Atomic Table, used only by Bios - USHORT EnableSpreadSpectrumOnPPLL; //Atomic Table, directly used by various SW components,latest version 1.2 - USHORT TMDSAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 - USHORT SetVoltage; //Function Table,directly and/or indirectly used by various SW components,latest version 1.1 - USHORT DAC1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 - USHORT ReadEfuseValue; //Atomic Table, directly used by various SW components,latest version 1.1 - USHORT ComputeMemoryClockParam; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C" - USHORT ClockSource; //Atomic Table, indirectly used by various SW components,called from ASIC_Init - USHORT MemoryDeviceInit; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock - USHORT GetDispObjectInfo; //Atomic Table, indirectly used by various SW components,called from EnableVGARender - USHORT DIG1EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1 - USHORT DIG2EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1 - USHORT DIG1TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1 - USHORT DIG2TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1 - USHORT ProcessAuxChannelTransaction; //Function Table,only used by Bios - USHORT DPEncoderService; //Function Table,only used by Bios - USHORT GetVoltageInfo; //Function Table,only used by Bios since SI -}ATOM_MASTER_LIST_OF_COMMAND_TABLES; - -// For backward compatible -#define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction -#define DPTranslatorControl DIG2EncoderControl -#define UNIPHYTransmitterControl DIG1TransmitterControl -#define LVTMATransmitterControl DIG2TransmitterControl -#define SetCRTC_DPM_State GetConditionalGoldenSetting -#define ASIC_StaticPwrMgtStatusChange SetUniphyInstance -#define HPDInterruptService ReadHWAssistedI2CStatus -#define EnableVGA_Access GetSCLKOverMCLKRatio -#define EnableYUV GetDispObjectInfo -#define DynamicClockGating EnableDispPowerGating -#define SetupHWAssistedI2CStatus ComputeMemoryClockParam -#define DAC2OutputControl ReadEfuseValue - -#define TMDSAEncoderControl PatchMCSetting -#define LVDSEncoderControl MC_SEQ_Control -#define LCD1OutputControl HW_Misc_Operation -#define TV1OutputControl Gfx_Harvesting -#define TVEncoderControl SMC_Init - -typedef struct _ATOM_MASTER_COMMAND_TABLE -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables; -}ATOM_MASTER_COMMAND_TABLE; - -/****************************************************************************/ -// Structures used in every command table -/****************************************************************************/ -typedef struct _ATOM_TABLE_ATTRIBUTE -{ -#if ATOM_BIG_ENDIAN - USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag - USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword), - USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword), -#else - USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword), - USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword), - USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag -#endif -}ATOM_TABLE_ATTRIBUTE; - -/****************************************************************************/ -// Common header for all command tables. -// Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header. -// And the pointer actually points to this header. -/****************************************************************************/ -typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER -{ - ATOM_COMMON_TABLE_HEADER CommonHeader; - ATOM_TABLE_ATTRIBUTE TableAttribute; -}ATOM_COMMON_ROM_COMMAND_TABLE_HEADER; - -/****************************************************************************/ -// Structures used by ComputeMemoryEnginePLLTable -/****************************************************************************/ - -#define COMPUTE_MEMORY_PLL_PARAM 1 -#define COMPUTE_ENGINE_PLL_PARAM 2 -#define ADJUST_MC_SETTING_PARAM 3 - -/****************************************************************************/ -// Structures used by AdjustMemoryControllerTable -/****************************************************************************/ -typedef struct _ATOM_ADJUST_MEMORY_CLOCK_FREQ -{ -#if ATOM_BIG_ENDIAN - ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block - ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0] - ULONG ulClockFreq:24; -#else - ULONG ulClockFreq:24; - ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0] - ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block -#endif -}ATOM_ADJUST_MEMORY_CLOCK_FREQ; -#define POINTER_RETURN_FLAG 0x80 - -typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS -{ - ULONG ulClock; //When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div - UCHAR ucAction; //0:reserved //1:Memory //2:Engine - UCHAR ucReserved; //may expand to return larger Fbdiv later - UCHAR ucFbDiv; //return value - UCHAR ucPostDiv; //return value -}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS; - -typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 -{ - ULONG ulClock; //When return, [23:0] return real clock - UCHAR ucAction; //0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register - USHORT usFbDiv; //return Feedback value to be written to register - UCHAR ucPostDiv; //return post div to be written to register -}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2; - -#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS - -#define SET_CLOCK_FREQ_MASK 0x00FFFFFF //Clock change tables only take bit [23:0] as the requested clock value -#define USE_NON_BUS_CLOCK_MASK 0x01000000 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) -#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 //Only applicable to memory clock change, when set, using memory self refresh during clock transition -#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change -#define FIRST_TIME_CHANGE_CLOCK 0x08000000 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup -#define SKIP_SW_PROGRAM_PLL 0x10000000 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL -#define USE_SS_ENABLED_PIXEL_CLOCK USE_NON_BUS_CLOCK_MASK - -#define b3USE_NON_BUS_CLOCK_MASK 0x01 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) -#define b3USE_MEMORY_SELF_REFRESH 0x02 //Only applicable to memory clock change, when set, using memory self refresh during clock transition -#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change -#define b3FIRST_TIME_CHANGE_CLOCK 0x08 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup -#define b3SKIP_SW_PROGRAM_PLL 0x10 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL -#define b3DRAM_SELF_REFRESH_EXIT 0x20 //Applicable to DRAM self refresh exit only. when set, it means it will go to program DRAM self refresh exit path - -typedef struct _ATOM_COMPUTE_CLOCK_FREQ -{ -#if ATOM_BIG_ENDIAN - ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM - ULONG ulClockFreq:24; // in unit of 10kHz -#else - ULONG ulClockFreq:24; // in unit of 10kHz - ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM -#endif -}ATOM_COMPUTE_CLOCK_FREQ; - -typedef struct _ATOM_S_MPLL_FB_DIVIDER -{ - USHORT usFbDivFrac; - USHORT usFbDiv; -}ATOM_S_MPLL_FB_DIVIDER; - -typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 -{ - union - { - ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter - ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter - }; - UCHAR ucRefDiv; //Output Parameter - UCHAR ucPostDiv; //Output Parameter - UCHAR ucCntlFlag; //Output Parameter - UCHAR ucReserved; -}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3; - -// ucCntlFlag -#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN 1 -#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE 2 -#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE 4 -#define ATOM_PLL_CNTL_FLAG_SPLL_ISPARE_9 8 - - -// V4 are only used for APU which PLL outside GPU -typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 -{ -#if ATOM_BIG_ENDIAN - ULONG ucPostDiv:8; //return parameter: post divider which is used to program to register directly - ULONG ulClock:24; //Input= target clock, output = actual clock -#else - ULONG ulClock:24; //Input= target clock, output = actual clock - ULONG ucPostDiv:8; //return parameter: post divider which is used to program to register directly -#endif -}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4; - -typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 -{ - union - { - ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter - ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter - }; - UCHAR ucRefDiv; //Output Parameter - UCHAR ucPostDiv; //Output Parameter - union - { - UCHAR ucCntlFlag; //Output Flags - UCHAR ucInputFlag; //Input Flags. ucInputFlag[0] - Strobe(1)/Performance(0) mode - }; - UCHAR ucReserved; -}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5; - - -typedef struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6 -{ - ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter - ULONG ulReserved[2]; -}COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6; - -//ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag -#define COMPUTE_GPUCLK_INPUT_FLAG_CLK_TYPE_MASK 0x0f -#define COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK 0x00 -#define COMPUTE_GPUCLK_INPUT_FLAG_SCLK 0x01 - - -typedef struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 -{ - COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 ulClock; //Output Parameter: ucPostDiv=DFS divider - ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter: PLL FB divider - UCHAR ucPllRefDiv; //Output Parameter: PLL ref divider - UCHAR ucPllPostDiv; //Output Parameter: PLL post divider - UCHAR ucPllCntlFlag; //Output Flags: control flag - UCHAR ucReserved; -}COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6; - -//ucPllCntlFlag -#define SPLL_CNTL_FLAG_VCO_MODE_MASK 0x03 - - -// ucInputFlag -#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode - -// use for ComputeMemoryClockParamTable -typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 -{ - union - { - ULONG ulClock; - ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output:UPPER_WORD=FB_DIV_INTEGER, LOWER_WORD=FB_DIV_FRAC shl (16-FB_FRACTION_BITS) - }; - UCHAR ucDllSpeed; //Output - UCHAR ucPostDiv; //Output - union{ - UCHAR ucInputFlag; //Input : ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN: 1-StrobeMode, 0-PerformanceMode - UCHAR ucPllCntlFlag; //Output: - }; - UCHAR ucBWCntl; -}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1; - -// definition of ucInputFlag -#define MPLL_INPUT_FLAG_STROBE_MODE_EN 0x01 -// definition of ucPllCntlFlag -#define MPLL_CNTL_FLAG_VCO_MODE_MASK 0x03 -#define MPLL_CNTL_FLAG_BYPASS_DQ_PLL 0x04 -#define MPLL_CNTL_FLAG_QDR_ENABLE 0x08 -#define MPLL_CNTL_FLAG_AD_HALF_RATE 0x10 - -//MPLL_CNTL_FLAG_BYPASS_AD_PLL has a wrong name, should be BYPASS_DQ_PLL -#define MPLL_CNTL_FLAG_BYPASS_AD_PLL 0x04 - -typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER -{ - ATOM_COMPUTE_CLOCK_FREQ ulClock; - ULONG ulReserved[2]; -}DYNAMICE_MEMORY_SETTINGS_PARAMETER; - -typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER -{ - ATOM_COMPUTE_CLOCK_FREQ ulClock; - ULONG ulMemoryClock; - ULONG ulReserved; -}DYNAMICE_ENGINE_SETTINGS_PARAMETER; - -/****************************************************************************/ -// Structures used by SetEngineClockTable -/****************************************************************************/ -typedef struct _SET_ENGINE_CLOCK_PARAMETERS -{ - ULONG ulTargetEngineClock; //In 10Khz unit -}SET_ENGINE_CLOCK_PARAMETERS; - -typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION -{ - ULONG ulTargetEngineClock; //In 10Khz unit - COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved; -}SET_ENGINE_CLOCK_PS_ALLOCATION; - -/****************************************************************************/ -// Structures used by SetMemoryClockTable -/****************************************************************************/ -typedef struct _SET_MEMORY_CLOCK_PARAMETERS -{ - ULONG ulTargetMemoryClock; //In 10Khz unit -}SET_MEMORY_CLOCK_PARAMETERS; - -typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION -{ - ULONG ulTargetMemoryClock; //In 10Khz unit - COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved; -}SET_MEMORY_CLOCK_PS_ALLOCATION; - -/****************************************************************************/ -// Structures used by ASIC_Init.ctb -/****************************************************************************/ -typedef struct _ASIC_INIT_PARAMETERS -{ - ULONG ulDefaultEngineClock; //In 10Khz unit - ULONG ulDefaultMemoryClock; //In 10Khz unit -}ASIC_INIT_PARAMETERS; - -typedef struct _ASIC_INIT_PS_ALLOCATION -{ - ASIC_INIT_PARAMETERS sASICInitClocks; - SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; //Caller doesn't need to init this structure -}ASIC_INIT_PS_ALLOCATION; - -typedef struct _ASIC_INIT_CLOCK_PARAMETERS -{ - ULONG ulClkFreqIn10Khz:24; - ULONG ucClkFlag:8; -}ASIC_INIT_CLOCK_PARAMETERS; - -typedef struct _ASIC_INIT_PARAMETERS_V1_2 -{ - ASIC_INIT_CLOCK_PARAMETERS asSclkClock; //In 10Khz unit - ASIC_INIT_CLOCK_PARAMETERS asMemClock; //In 10Khz unit -}ASIC_INIT_PARAMETERS_V1_2; - -typedef struct _ASIC_INIT_PS_ALLOCATION_V1_2 -{ - ASIC_INIT_PARAMETERS_V1_2 sASICInitClocks; - ULONG ulReserved[8]; -}ASIC_INIT_PS_ALLOCATION_V1_2; - -/****************************************************************************/ -// Structure used by DynamicClockGatingTable.ctb -/****************************************************************************/ -typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS -{ - UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE - UCHAR ucPadding[3]; -}DYNAMIC_CLOCK_GATING_PARAMETERS; -#define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS - -/****************************************************************************/ -// Structure used by EnableDispPowerGatingTable.ctb -/****************************************************************************/ -typedef struct _ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 -{ - UCHAR ucDispPipeId; // ATOM_CRTC1, ATOM_CRTC2, ... - UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE - UCHAR ucPadding[2]; -}ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1; - -typedef struct _ENABLE_DISP_POWER_GATING_PS_ALLOCATION -{ - UCHAR ucDispPipeId; // ATOM_CRTC1, ATOM_CRTC2, ... - UCHAR ucEnable; // ATOM_ENABLE/ATOM_DISABLE/ATOM_INIT - UCHAR ucPadding[2]; - ULONG ulReserved[4]; -}ENABLE_DISP_POWER_GATING_PS_ALLOCATION; - -/****************************************************************************/ -// Structure used by EnableASIC_StaticPwrMgtTable.ctb -/****************************************************************************/ -typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS -{ - UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE - UCHAR ucPadding[3]; -}ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS; -#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS - -/****************************************************************************/ -// Structures used by DAC_LoadDetectionTable.ctb -/****************************************************************************/ -typedef struct _DAC_LOAD_DETECTION_PARAMETERS -{ - USHORT usDeviceID; //{ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT} - UCHAR ucDacType; //{ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC} - UCHAR ucMisc; //Valid only when table revision =1.3 and above -}DAC_LOAD_DETECTION_PARAMETERS; - -// DAC_LOAD_DETECTION_PARAMETERS.ucMisc -#define DAC_LOAD_MISC_YPrPb 0x01 - -typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION -{ - DAC_LOAD_DETECTION_PARAMETERS sDacload; - ULONG Reserved[2];// Don't set this one, allocation for EXT DAC -}DAC_LOAD_DETECTION_PS_ALLOCATION; - -/****************************************************************************/ -// Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb -/****************************************************************************/ -typedef struct _DAC_ENCODER_CONTROL_PARAMETERS -{ - USHORT usPixelClock; // in 10KHz; for bios convenient - UCHAR ucDacStandard; // See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0) - UCHAR ucAction; // 0: turn off encoder - // 1: setup and turn on encoder - // 7: ATOM_ENCODER_INIT Initialize DAC -}DAC_ENCODER_CONTROL_PARAMETERS; - -#define DAC_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PARAMETERS - -/****************************************************************************/ -// Structures used by DIG1EncoderControlTable -// DIG2EncoderControlTable -// ExternalEncoderControlTable -/****************************************************************************/ -typedef struct _DIG_ENCODER_CONTROL_PARAMETERS -{ - USHORT usPixelClock; // in 10KHz; for bios convenient - UCHAR ucConfig; - // [2] Link Select: - // =0: PHY linkA if bfLane<3 - // =1: PHY linkB if bfLanes<3 - // =0: PHY linkA+B if bfLanes=3 - // [3] Transmitter Sel - // =0: UNIPHY or PCIEPHY - // =1: LVTMA - UCHAR ucAction; // =0: turn off encoder - // =1: turn on encoder - UCHAR ucEncoderMode; - // =0: DP encoder - // =1: LVDS encoder - // =2: DVI encoder - // =3: HDMI encoder - // =4: SDVO encoder - UCHAR ucLaneNum; // how many lanes to enable - UCHAR ucReserved[2]; -}DIG_ENCODER_CONTROL_PARAMETERS; -#define DIG_ENCODER_CONTROL_PS_ALLOCATION DIG_ENCODER_CONTROL_PARAMETERS -#define EXTERNAL_ENCODER_CONTROL_PARAMETER DIG_ENCODER_CONTROL_PARAMETERS - -//ucConfig -#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01 -#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00 -#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01 -#define ATOM_ENCODER_CONFIG_DPLINKRATE_5_40GHZ 0x02 -#define ATOM_ENCODER_CONFIG_LINK_SEL_MASK 0x04 -#define ATOM_ENCODER_CONFIG_LINKA 0x00 -#define ATOM_ENCODER_CONFIG_LINKB 0x04 -#define ATOM_ENCODER_CONFIG_LINKA_B ATOM_TRANSMITTER_CONFIG_LINKA -#define ATOM_ENCODER_CONFIG_LINKB_A ATOM_ENCODER_CONFIG_LINKB -#define ATOM_ENCODER_CONFIG_TRANSMITTER_SEL_MASK 0x08 -#define ATOM_ENCODER_CONFIG_UNIPHY 0x00 -#define ATOM_ENCODER_CONFIG_LVTMA 0x08 -#define ATOM_ENCODER_CONFIG_TRANSMITTER1 0x00 -#define ATOM_ENCODER_CONFIG_TRANSMITTER2 0x08 -#define ATOM_ENCODER_CONFIG_DIGB 0x80 // VBIOS Internal use, outside SW should set this bit=0 -// ucAction -// ATOM_ENABLE: Enable Encoder -// ATOM_DISABLE: Disable Encoder - -//ucEncoderMode -#define ATOM_ENCODER_MODE_DP 0 -#define ATOM_ENCODER_MODE_LVDS 1 -#define ATOM_ENCODER_MODE_DVI 2 -#define ATOM_ENCODER_MODE_HDMI 3 -#define ATOM_ENCODER_MODE_SDVO 4 -#define ATOM_ENCODER_MODE_DP_AUDIO 5 -#define ATOM_ENCODER_MODE_TV 13 -#define ATOM_ENCODER_MODE_CV 14 -#define ATOM_ENCODER_MODE_CRT 15 -#define ATOM_ENCODER_MODE_DVO 16 -#define ATOM_ENCODER_MODE_DP_SST ATOM_ENCODER_MODE_DP // For DP1.2 -#define ATOM_ENCODER_MODE_DP_MST 5 // For DP1.2 - - -typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 -{ -#if ATOM_BIG_ENDIAN - UCHAR ucReserved1:2; - UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF - UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F - UCHAR ucReserved:1; - UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz -#else - UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz - UCHAR ucReserved:1; - UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F - UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF - UCHAR ucReserved1:2; -#endif -}ATOM_DIG_ENCODER_CONFIG_V2; - - -typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 -{ - USHORT usPixelClock; // in 10KHz; for bios convenient - ATOM_DIG_ENCODER_CONFIG_V2 acConfig; - UCHAR ucAction; - UCHAR ucEncoderMode; - // =0: DP encoder - // =1: LVDS encoder - // =2: DVI encoder - // =3: HDMI encoder - // =4: SDVO encoder - UCHAR ucLaneNum; // how many lanes to enable - UCHAR ucStatus; // = DP_LINK_TRAINING_COMPLETE or DP_LINK_TRAINING_INCOMPLETE, only used by VBIOS with command ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS - UCHAR ucReserved; -}DIG_ENCODER_CONTROL_PARAMETERS_V2; - -//ucConfig -#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK 0x01 -#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ 0x00 -#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ 0x01 -#define ATOM_ENCODER_CONFIG_V2_LINK_SEL_MASK 0x04 -#define ATOM_ENCODER_CONFIG_V2_LINKA 0x00 -#define ATOM_ENCODER_CONFIG_V2_LINKB 0x04 -#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER_SEL_MASK 0x18 -#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER1 0x00 -#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2 0x08 -#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3 0x10 - -// ucAction: -// ATOM_DISABLE -// ATOM_ENABLE -#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08 -#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09 -#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a -#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3 0x13 -#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b -#define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c -#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d -#define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS 0x0e -#define ATOM_ENCODER_CMD_SETUP 0x0f -#define ATOM_ENCODER_CMD_SETUP_PANEL_MODE 0x10 - -// ucStatus -#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10 -#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00 - -//ucTableFormatRevision=1 -//ucTableContentRevision=3 -// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver -typedef struct _ATOM_DIG_ENCODER_CONFIG_V3 -{ -#if ATOM_BIG_ENDIAN - UCHAR ucReserved1:1; - UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F) - UCHAR ucReserved:3; - UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz -#else - UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz - UCHAR ucReserved:3; - UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F) - UCHAR ucReserved1:1; -#endif -}ATOM_DIG_ENCODER_CONFIG_V3; - -#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03 -#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00 -#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01 -#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70 -#define ATOM_ENCODER_CONFIG_V3_DIG0_ENCODER 0x00 -#define ATOM_ENCODER_CONFIG_V3_DIG1_ENCODER 0x10 -#define ATOM_ENCODER_CONFIG_V3_DIG2_ENCODER 0x20 -#define ATOM_ENCODER_CONFIG_V3_DIG3_ENCODER 0x30 -#define ATOM_ENCODER_CONFIG_V3_DIG4_ENCODER 0x40 -#define ATOM_ENCODER_CONFIG_V3_DIG5_ENCODER 0x50 - -typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3 -{ - USHORT usPixelClock; // in 10KHz; for bios convenient - ATOM_DIG_ENCODER_CONFIG_V3 acConfig; - UCHAR ucAction; - union{ - UCHAR ucEncoderMode; - // =0: DP encoder - // =1: LVDS encoder - // =2: DVI encoder - // =3: HDMI encoder - // =4: SDVO encoder - // =5: DP audio - UCHAR ucPanelMode; // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE - // =0: external DP - // =0x1: internal DP2 - // =0x11: internal DP1 for NutMeg/Travis DP translator - }; - UCHAR ucLaneNum; // how many lanes to enable - UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP - UCHAR ucReserved; -}DIG_ENCODER_CONTROL_PARAMETERS_V3; - -//ucTableFormatRevision=1 -//ucTableContentRevision=4 -// start from NI -// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver -typedef struct _ATOM_DIG_ENCODER_CONFIG_V4 -{ -#if ATOM_BIG_ENDIAN - UCHAR ucReserved1:1; - UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F) - UCHAR ucReserved:2; - UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version -#else - UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version - UCHAR ucReserved:2; - UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F) - UCHAR ucReserved1:1; -#endif -}ATOM_DIG_ENCODER_CONFIG_V4; - -#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_MASK 0x03 -#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ 0x00 -#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ 0x01 -#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ 0x02 -#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ 0x03 -#define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL 0x70 -#define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER 0x00 -#define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER 0x10 -#define ATOM_ENCODER_CONFIG_V4_DIG2_ENCODER 0x20 -#define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER 0x30 -#define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER 0x40 -#define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER 0x50 -#define ATOM_ENCODER_CONFIG_V4_DIG6_ENCODER 0x60 - -typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4 -{ - USHORT usPixelClock; // in 10KHz; for bios convenient - union{ - ATOM_DIG_ENCODER_CONFIG_V4 acConfig; - UCHAR ucConfig; - }; - UCHAR ucAction; - union{ - UCHAR ucEncoderMode; - // =0: DP encoder - // =1: LVDS encoder - // =2: DVI encoder - // =3: HDMI encoder - // =4: SDVO encoder - // =5: DP audio - UCHAR ucPanelMode; // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE - // =0: external DP - // =0x1: internal DP2 - // =0x11: internal DP1 for NutMeg/Travis DP translator - }; - UCHAR ucLaneNum; // how many lanes to enable - UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP - UCHAR ucHPD_ID; // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version -}DIG_ENCODER_CONTROL_PARAMETERS_V4; - -// define ucBitPerColor: -#define PANEL_BPC_UNDEFINE 0x00 -#define PANEL_6BIT_PER_COLOR 0x01 -#define PANEL_8BIT_PER_COLOR 0x02 -#define PANEL_10BIT_PER_COLOR 0x03 -#define PANEL_12BIT_PER_COLOR 0x04 -#define PANEL_16BIT_PER_COLOR 0x05 - -//define ucPanelMode -#define DP_PANEL_MODE_EXTERNAL_DP_MODE 0x00 -#define DP_PANEL_MODE_INTERNAL_DP2_MODE 0x01 -#define DP_PANEL_MODE_INTERNAL_DP1_MODE 0x11 - -/****************************************************************************/ -// Structures used by UNIPHYTransmitterControlTable -// LVTMATransmitterControlTable -// DVOOutputControlTable -/****************************************************************************/ -typedef struct _ATOM_DP_VS_MODE -{ - UCHAR ucLaneSel; - UCHAR ucLaneSet; -}ATOM_DP_VS_MODE; - -typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS -{ - union - { - USHORT usPixelClock; // in 10KHz; for bios convenient - USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h - ATOM_DP_VS_MODE asMode; // DP Voltage swing mode - }; - UCHAR ucConfig; - // [0]=0: 4 lane Link, - // =1: 8 lane Link ( Dual Links TMDS ) - // [1]=0: InCoherent mode - // =1: Coherent Mode - // [2] Link Select: - // =0: PHY linkA if bfLane<3 - // =1: PHY linkB if bfLanes<3 - // =0: PHY linkA+B if bfLanes=3 - // [5:4]PCIE lane Sel - // =0: lane 0~3 or 0~7 - // =1: lane 4~7 - // =2: lane 8~11 or 8~15 - // =3: lane 12~15 - UCHAR ucAction; // =0: turn off encoder - // =1: turn on encoder - UCHAR ucReserved[4]; -}DIG_TRANSMITTER_CONTROL_PARAMETERS; - -#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS - -//ucInitInfo -#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff - -//ucConfig -#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK 0x01 -#define ATOM_TRANSMITTER_CONFIG_COHERENT 0x02 -#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK 0x04 -#define ATOM_TRANSMITTER_CONFIG_LINKA 0x00 -#define ATOM_TRANSMITTER_CONFIG_LINKB 0x04 -#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00 -#define ATOM_TRANSMITTER_CONFIG_LINKB_A 0x04 - -#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE -#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 // only used when ATOM_TRANSMITTER_ACTION_ENABLE -#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE - -#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK 0x30 -#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL 0x00 -#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PCIE 0x20 -#define ATOM_TRANSMITTER_CONFIG_CLKSRC_XTALIN 0x30 -#define ATOM_TRANSMITTER_CONFIG_LANE_SEL_MASK 0xc0 -#define ATOM_TRANSMITTER_CONFIG_LANE_0_3 0x00 -#define ATOM_TRANSMITTER_CONFIG_LANE_0_7 0x00 -#define ATOM_TRANSMITTER_CONFIG_LANE_4_7 0x40 -#define ATOM_TRANSMITTER_CONFIG_LANE_8_11 0x80 -#define ATOM_TRANSMITTER_CONFIG_LANE_8_15 0x80 -#define ATOM_TRANSMITTER_CONFIG_LANE_12_15 0xc0 - -//ucAction -#define ATOM_TRANSMITTER_ACTION_DISABLE 0 -#define ATOM_TRANSMITTER_ACTION_ENABLE 1 -#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF 2 -#define ATOM_TRANSMITTER_ACTION_LCD_BLON 3 -#define ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL 4 -#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_START 5 -#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_STOP 6 -#define ATOM_TRANSMITTER_ACTION_INIT 7 -#define ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT 8 -#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT 9 -#define ATOM_TRANSMITTER_ACTION_SETUP 10 -#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH 11 -#define ATOM_TRANSMITTER_ACTION_POWER_ON 12 -#define ATOM_TRANSMITTER_ACTION_POWER_OFF 13 - -// Following are used for DigTransmitterControlTable ver1.2 -typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2 -{ -#if ATOM_BIG_ENDIAN - UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) - // =1 Dig Transmitter 2 ( Uniphy CD ) - // =2 Dig Transmitter 3 ( Uniphy EF ) - UCHAR ucReserved:1; - UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector - UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) - UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E - // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F - - UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) - UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector -#else - UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector - UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) - UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E - // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F - UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) - UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector - UCHAR ucReserved:1; - UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) - // =1 Dig Transmitter 2 ( Uniphy CD ) - // =2 Dig Transmitter 3 ( Uniphy EF ) -#endif -}ATOM_DIG_TRANSMITTER_CONFIG_V2; - -//ucConfig -//Bit0 -#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR 0x01 - -//Bit1 -#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT 0x02 - -//Bit2 -#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK 0x04 -#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00 -#define ATOM_TRANSMITTER_CONFIG_V2_LINKB 0x04 - -// Bit3 -#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK 0x08 -#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP -#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP - -// Bit4 -#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR 0x10 - -// Bit7:6 -#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK 0xC0 -#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 //AB -#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 //CD -#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 //EF - -typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 -{ - union - { - USHORT usPixelClock; // in 10KHz; for bios convenient - USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h - ATOM_DP_VS_MODE asMode; // DP Voltage swing mode - }; - ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig; - UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX - UCHAR ucReserved[4]; -}DIG_TRANSMITTER_CONTROL_PARAMETERS_V2; - -typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3 -{ -#if ATOM_BIG_ENDIAN - UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) - // =1 Dig Transmitter 2 ( Uniphy CD ) - // =2 Dig Transmitter 3 ( Uniphy EF ) - UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2 - UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F - UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E - // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F - UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) - UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector -#else - UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector - UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) - UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E - // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F - UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F - UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2 - UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) - // =1 Dig Transmitter 2 ( Uniphy CD ) - // =2 Dig Transmitter 3 ( Uniphy EF ) -#endif -}ATOM_DIG_TRANSMITTER_CONFIG_V3; - - -typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 -{ - union - { - USHORT usPixelClock; // in 10KHz; for bios convenient - USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h - ATOM_DP_VS_MODE asMode; // DP Voltage swing mode - }; - ATOM_DIG_TRANSMITTER_CONFIG_V3 acConfig; - UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX - UCHAR ucLaneNum; - UCHAR ucReserved[3]; -}DIG_TRANSMITTER_CONTROL_PARAMETERS_V3; - -//ucConfig -//Bit0 -#define ATOM_TRANSMITTER_CONFIG_V3_DUAL_LINK_CONNECTOR 0x01 - -//Bit1 -#define ATOM_TRANSMITTER_CONFIG_V3_COHERENT 0x02 - -//Bit2 -#define ATOM_TRANSMITTER_CONFIG_V3_LINK_SEL_MASK 0x04 -#define ATOM_TRANSMITTER_CONFIG_V3_LINKA 0x00 -#define ATOM_TRANSMITTER_CONFIG_V3_LINKB 0x04 - -// Bit3 -#define ATOM_TRANSMITTER_CONFIG_V3_ENCODER_SEL_MASK 0x08 -#define ATOM_TRANSMITTER_CONFIG_V3_DIG1_ENCODER 0x00 -#define ATOM_TRANSMITTER_CONFIG_V3_DIG2_ENCODER 0x08 - -// Bit5:4 -#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SEL_MASK 0x30 -#define ATOM_TRASMITTER_CONFIG_V3_P1PLL 0x00 -#define ATOM_TRASMITTER_CONFIG_V3_P2PLL 0x10 -#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SRC_EXT 0x20 - -// Bit7:6 -#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER_SEL_MASK 0xC0 -#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER1 0x00 //AB -#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD -#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF - - -/****************************************************************************/ -// Structures used by UNIPHYTransmitterControlTable V1.4 -// ASIC Families: NI -// ucTableFormatRevision=1 -// ucTableContentRevision=4 -/****************************************************************************/ -typedef struct _ATOM_DP_VS_MODE_V4 -{ - UCHAR ucLaneSel; - union - { - UCHAR ucLaneSet; - struct { -#if ATOM_BIG_ENDIAN - UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4 - UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level - UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level -#else - UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level - UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level - UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4 -#endif - }; - }; -}ATOM_DP_VS_MODE_V4; - -typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V4 -{ -#if ATOM_BIG_ENDIAN - UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) - // =1 Dig Transmitter 2 ( Uniphy CD ) - // =2 Dig Transmitter 3 ( Uniphy EF ) - UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New - UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F - UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E - // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F - UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) - UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector -#else - UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector - UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) - UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E - // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F - UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F - UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New - UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) - // =1 Dig Transmitter 2 ( Uniphy CD ) - // =2 Dig Transmitter 3 ( Uniphy EF ) -#endif -}ATOM_DIG_TRANSMITTER_CONFIG_V4; - -typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 -{ - union - { - USHORT usPixelClock; // in 10KHz; for bios convenient - USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h - ATOM_DP_VS_MODE_V4 asMode; // DP Voltage swing mode Redefined comparing to previous version - }; - union - { - ATOM_DIG_TRANSMITTER_CONFIG_V4 acConfig; - UCHAR ucConfig; - }; - UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX - UCHAR ucLaneNum; - UCHAR ucReserved[3]; -}DIG_TRANSMITTER_CONTROL_PARAMETERS_V4; - -//ucConfig -//Bit0 -#define ATOM_TRANSMITTER_CONFIG_V4_DUAL_LINK_CONNECTOR 0x01 -//Bit1 -#define ATOM_TRANSMITTER_CONFIG_V4_COHERENT 0x02 -//Bit2 -#define ATOM_TRANSMITTER_CONFIG_V4_LINK_SEL_MASK 0x04 -#define ATOM_TRANSMITTER_CONFIG_V4_LINKA 0x00 -#define ATOM_TRANSMITTER_CONFIG_V4_LINKB 0x04 -// Bit3 -#define ATOM_TRANSMITTER_CONFIG_V4_ENCODER_SEL_MASK 0x08 -#define ATOM_TRANSMITTER_CONFIG_V4_DIG1_ENCODER 0x00 -#define ATOM_TRANSMITTER_CONFIG_V4_DIG2_ENCODER 0x08 -// Bit5:4 -#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SEL_MASK 0x30 -#define ATOM_TRANSMITTER_CONFIG_V4_P1PLL 0x00 -#define ATOM_TRANSMITTER_CONFIG_V4_P2PLL 0x10 -#define ATOM_TRANSMITTER_CONFIG_V4_DCPLL 0x20 // New in _V4 -#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SRC_EXT 0x30 // Changed comparing to V3 -// Bit7:6 -#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER_SEL_MASK 0xC0 -#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER1 0x00 //AB -#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER2 0x40 //CD -#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3 0x80 //EF - - -typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V5 -{ -#if ATOM_BIG_ENDIAN - UCHAR ucReservd1:1; - UCHAR ucHPDSel:3; - UCHAR ucPhyClkSrcId:2; - UCHAR ucCoherentMode:1; - UCHAR ucReserved:1; -#else - UCHAR ucReserved:1; - UCHAR ucCoherentMode:1; - UCHAR ucPhyClkSrcId:2; - UCHAR ucHPDSel:3; - UCHAR ucReservd1:1; -#endif -}ATOM_DIG_TRANSMITTER_CONFIG_V5; - -typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 -{ - USHORT usSymClock; // Encoder Clock in 10kHz,(DP mode)= linkclock/10, (TMDS/LVDS/HDMI)= pixel clock, (HDMI deep color), =pixel clock * deep_color_ratio - UCHAR ucPhyId; // 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4= UNIPHYE 5=UNIPHYF - UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_xxx - UCHAR ucLaneNum; // indicate lane number 1-8 - UCHAR ucConnObjId; // Connector Object Id defined in ObjectId.h - UCHAR ucDigMode; // indicate DIG mode - union{ - ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig; - UCHAR ucConfig; - }; - UCHAR ucDigEncoderSel; // indicate DIG front end encoder - UCHAR ucDPLaneSet; - UCHAR ucReserved; - UCHAR ucReserved1; -}DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5; - -//ucPhyId -#define ATOM_PHY_ID_UNIPHYA 0 -#define ATOM_PHY_ID_UNIPHYB 1 -#define ATOM_PHY_ID_UNIPHYC 2 -#define ATOM_PHY_ID_UNIPHYD 3 -#define ATOM_PHY_ID_UNIPHYE 4 -#define ATOM_PHY_ID_UNIPHYF 5 -#define ATOM_PHY_ID_UNIPHYG 6 - -// ucDigEncoderSel -#define ATOM_TRANMSITTER_V5__DIGA_SEL 0x01 -#define ATOM_TRANMSITTER_V5__DIGB_SEL 0x02 -#define ATOM_TRANMSITTER_V5__DIGC_SEL 0x04 -#define ATOM_TRANMSITTER_V5__DIGD_SEL 0x08 -#define ATOM_TRANMSITTER_V5__DIGE_SEL 0x10 -#define ATOM_TRANMSITTER_V5__DIGF_SEL 0x20 -#define ATOM_TRANMSITTER_V5__DIGG_SEL 0x40 - -// ucDigMode -#define ATOM_TRANSMITTER_DIGMODE_V5_DP 0 -#define ATOM_TRANSMITTER_DIGMODE_V5_LVDS 1 -#define ATOM_TRANSMITTER_DIGMODE_V5_DVI 2 -#define ATOM_TRANSMITTER_DIGMODE_V5_HDMI 3 -#define ATOM_TRANSMITTER_DIGMODE_V5_SDVO 4 -#define ATOM_TRANSMITTER_DIGMODE_V5_DP_MST 5 - -// ucDPLaneSet -#define DP_LANE_SET__0DB_0_4V 0x00 -#define DP_LANE_SET__0DB_0_6V 0x01 -#define DP_LANE_SET__0DB_0_8V 0x02 -#define DP_LANE_SET__0DB_1_2V 0x03 -#define DP_LANE_SET__3_5DB_0_4V 0x08 -#define DP_LANE_SET__3_5DB_0_6V 0x09 -#define DP_LANE_SET__3_5DB_0_8V 0x0a -#define DP_LANE_SET__6DB_0_4V 0x10 -#define DP_LANE_SET__6DB_0_6V 0x11 -#define DP_LANE_SET__9_5DB_0_4V 0x18 - -// ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig; -// Bit1 -#define ATOM_TRANSMITTER_CONFIG_V5_COHERENT 0x02 - -// Bit3:2 -#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_MASK 0x0c -#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_SHIFT 0x02 - -#define ATOM_TRANSMITTER_CONFIG_V5_P1PLL 0x00 -#define ATOM_TRANSMITTER_CONFIG_V5_P2PLL 0x04 -#define ATOM_TRANSMITTER_CONFIG_V5_P0PLL 0x08 -#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT 0x0c -// Bit6:4 -#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_MASK 0x70 -#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_SHIFT 0x04 - -#define ATOM_TRANSMITTER_CONFIG_V5_NO_HPD_SEL 0x00 -#define ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL 0x10 -#define ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL 0x20 -#define ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL 0x30 -#define ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL 0x40 -#define ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL 0x50 -#define ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL 0x60 - -#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION_V1_5 DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 - - -/****************************************************************************/ -// Structures used by ExternalEncoderControlTable V1.3 -// ASIC Families: Evergreen, Llano, NI -// ucTableFormatRevision=1 -// ucTableContentRevision=3 -/****************************************************************************/ - -typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 -{ - union{ - USHORT usPixelClock; // pixel clock in 10Khz, valid when ucAction=SETUP/ENABLE_OUTPUT - USHORT usConnectorId; // connector id, valid when ucAction = INIT - }; - UCHAR ucConfig; // indicate which encoder, and DP link rate when ucAction = SETUP/ENABLE_OUTPUT - UCHAR ucAction; // - UCHAR ucEncoderMode; // encoder mode, only used when ucAction = SETUP/ENABLE_OUTPUT - UCHAR ucLaneNum; // lane number, only used when ucAction = SETUP/ENABLE_OUTPUT - UCHAR ucBitPerColor; // output bit per color, only valid when ucAction = SETUP/ENABLE_OUTPUT and ucEncodeMode= DP - UCHAR ucReserved; -}EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3; - -// ucAction -#define EXTERANL_ENCODER_ACTION_V3_DISABLE_OUTPUT 0x00 -#define EXTERANL_ENCODER_ACTION_V3_ENABLE_OUTPUT 0x01 -#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT 0x07 -#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP 0x0f -#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10 -#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11 -#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12 -#define EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP 0x14 - -// ucConfig -#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03 -#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00 -#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01 -#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ 0x02 -#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER_SEL_MAKS 0x70 -#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER1 0x00 -#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER2 0x10 -#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER3 0x20 - -typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 -{ - EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 sExtEncoder; - ULONG ulReserved[2]; -}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3; - - -/****************************************************************************/ -// Structures used by DAC1OuputControlTable -// DAC2OuputControlTable -// LVTMAOutputControlTable (Before DEC30) -// TMDSAOutputControlTable (Before DEC30) -/****************************************************************************/ -typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS -{ - UCHAR ucAction; // Possible input:ATOM_ENABLE||ATOMDISABLE - // When the display is LCD, in addition to above: - // ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START|| - // ATOM_LCD_SELFTEST_STOP - - UCHAR aucPadding[3]; // padding to DWORD aligned -}DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS; - -#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS - - -#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS -#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION - -#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS -#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION - -#define CV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS -#define CV1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION - -#define TV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS -#define TV1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION - -#define DFP1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS -#define DFP1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION - -#define DFP2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS -#define DFP2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION - -#define LCD1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS -#define LCD1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION - -#define DVO_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS -#define DVO_OUTPUT_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PS_ALLOCATION -#define DVO_OUTPUT_CONTROL_PARAMETERS_V3 DIG_TRANSMITTER_CONTROL_PARAMETERS - - -typedef struct _LVTMA_OUTPUT_CONTROL_PARAMETERS_V2 -{ - // Possible value of ucAction - // ATOM_TRANSMITTER_ACTION_LCD_BLON - // ATOM_TRANSMITTER_ACTION_LCD_BLOFF - // ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL - // ATOM_TRANSMITTER_ACTION_POWER_ON - // ATOM_TRANSMITTER_ACTION_POWER_OFF - UCHAR ucAction; - UCHAR ucBriLevel; - USHORT usPwmFreq; // in unit of Hz, 200 means 200Hz -}LVTMA_OUTPUT_CONTROL_PARAMETERS_V2; - - - -/****************************************************************************/ -// Structures used by BlankCRTCTable -/****************************************************************************/ -typedef struct _BLANK_CRTC_PARAMETERS -{ - UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 - UCHAR ucBlanking; // ATOM_BLANKING or ATOM_BLANKINGOFF - USHORT usBlackColorRCr; - USHORT usBlackColorGY; - USHORT usBlackColorBCb; -}BLANK_CRTC_PARAMETERS; -#define BLANK_CRTC_PS_ALLOCATION BLANK_CRTC_PARAMETERS - -/****************************************************************************/ -// Structures used by EnableCRTCTable -// EnableCRTCMemReqTable -// UpdateCRTC_DoubleBufferRegistersTable -/****************************************************************************/ -typedef struct _ENABLE_CRTC_PARAMETERS -{ - UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 - UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE - UCHAR ucPadding[2]; -}ENABLE_CRTC_PARAMETERS; -#define ENABLE_CRTC_PS_ALLOCATION ENABLE_CRTC_PARAMETERS - -/****************************************************************************/ -// Structures used by SetCRTC_OverScanTable -/****************************************************************************/ -typedef struct _SET_CRTC_OVERSCAN_PARAMETERS -{ - USHORT usOverscanRight; // right - USHORT usOverscanLeft; // left - USHORT usOverscanBottom; // bottom - USHORT usOverscanTop; // top - UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 - UCHAR ucPadding[3]; -}SET_CRTC_OVERSCAN_PARAMETERS; -#define SET_CRTC_OVERSCAN_PS_ALLOCATION SET_CRTC_OVERSCAN_PARAMETERS - -/****************************************************************************/ -// Structures used by SetCRTC_ReplicationTable -/****************************************************************************/ -typedef struct _SET_CRTC_REPLICATION_PARAMETERS -{ - UCHAR ucH_Replication; // horizontal replication - UCHAR ucV_Replication; // vertical replication - UCHAR usCRTC; // ATOM_CRTC1 or ATOM_CRTC2 - UCHAR ucPadding; -}SET_CRTC_REPLICATION_PARAMETERS; -#define SET_CRTC_REPLICATION_PS_ALLOCATION SET_CRTC_REPLICATION_PARAMETERS - -/****************************************************************************/ -// Structures used by SelectCRTC_SourceTable -/****************************************************************************/ -typedef struct _SELECT_CRTC_SOURCE_PARAMETERS -{ - UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 - UCHAR ucDevice; // ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|.... - UCHAR ucPadding[2]; -}SELECT_CRTC_SOURCE_PARAMETERS; -#define SELECT_CRTC_SOURCE_PS_ALLOCATION SELECT_CRTC_SOURCE_PARAMETERS - -typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2 -{ - UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 - UCHAR ucEncoderID; // DAC1/DAC2/TVOUT/DIG1/DIG2/DVO - UCHAR ucEncodeMode; // Encoding mode, only valid when using DIG1/DIG2/DVO - UCHAR ucPadding; -}SELECT_CRTC_SOURCE_PARAMETERS_V2; - -//ucEncoderID -//#define ASIC_INT_DAC1_ENCODER_ID 0x00 -//#define ASIC_INT_TV_ENCODER_ID 0x02 -//#define ASIC_INT_DIG1_ENCODER_ID 0x03 -//#define ASIC_INT_DAC2_ENCODER_ID 0x04 -//#define ASIC_EXT_TV_ENCODER_ID 0x06 -//#define ASIC_INT_DVO_ENCODER_ID 0x07 -//#define ASIC_INT_DIG2_ENCODER_ID 0x09 -//#define ASIC_EXT_DIG_ENCODER_ID 0x05 - -//ucEncodeMode -//#define ATOM_ENCODER_MODE_DP 0 -//#define ATOM_ENCODER_MODE_LVDS 1 -//#define ATOM_ENCODER_MODE_DVI 2 -//#define ATOM_ENCODER_MODE_HDMI 3 -//#define ATOM_ENCODER_MODE_SDVO 4 -//#define ATOM_ENCODER_MODE_TV 13 -//#define ATOM_ENCODER_MODE_CV 14 -//#define ATOM_ENCODER_MODE_CRT 15 - - -typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V3 -{ - UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 - UCHAR ucEncoderID; // DAC1/DAC2/TVOUT/DIG1/DIG2/DVO - UCHAR ucEncodeMode; // Encoding mode, only valid when using DIG1/DIG2/DVO - UCHAR ucDstBpc; // PANEL_6/8/10/12BIT_PER_COLOR -}SELECT_CRTC_SOURCE_PARAMETERS_V3; - - -/****************************************************************************/ -// Structures used by SetPixelClockTable -// GetPixelClockTable -/****************************************************************************/ -//Major revision=1., Minor revision=1 -typedef struct _PIXEL_CLOCK_PARAMETERS -{ - USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) - // 0 means disable PPLL - USHORT usRefDiv; // Reference divider - USHORT usFbDiv; // feedback divider - UCHAR ucPostDiv; // post divider - UCHAR ucFracFbDiv; // fractional feedback divider - UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2 - UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER - UCHAR ucCRTC; // Which CRTC uses this Ppll - UCHAR ucPadding; -}PIXEL_CLOCK_PARAMETERS; - -//Major revision=1., Minor revision=2, add ucMiscIfno -//ucMiscInfo: -#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1 -#define MISC_DEVICE_INDEX_MASK 0xF0 -#define MISC_DEVICE_INDEX_SHIFT 4 - -typedef struct _PIXEL_CLOCK_PARAMETERS_V2 -{ - USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) - // 0 means disable PPLL - USHORT usRefDiv; // Reference divider - USHORT usFbDiv; // feedback divider - UCHAR ucPostDiv; // post divider - UCHAR ucFracFbDiv; // fractional feedback divider - UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2 - UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER - UCHAR ucCRTC; // Which CRTC uses this Ppll - UCHAR ucMiscInfo; // Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog -}PIXEL_CLOCK_PARAMETERS_V2; - -//Major revision=1., Minor revision=3, structure/definition change -//ucEncoderMode: -//ATOM_ENCODER_MODE_DP -//ATOM_ENOCDER_MODE_LVDS -//ATOM_ENOCDER_MODE_DVI -//ATOM_ENOCDER_MODE_HDMI -//ATOM_ENOCDER_MODE_SDVO -//ATOM_ENCODER_MODE_TV 13 -//ATOM_ENCODER_MODE_CV 14 -//ATOM_ENCODER_MODE_CRT 15 - -//ucDVOConfig -//#define DVO_ENCODER_CONFIG_RATE_SEL 0x01 -//#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 -//#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 -//#define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c -//#define DVO_ENCODER_CONFIG_LOW12BIT 0x00 -//#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 -//#define DVO_ENCODER_CONFIG_24BIT 0x08 - -//ucMiscInfo: also changed, see below -#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL 0x01 -#define PIXEL_CLOCK_MISC_VGA_MODE 0x02 -#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK 0x04 -#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1 0x00 -#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2 0x04 -#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK 0x08 -#define PIXEL_CLOCK_MISC_REF_DIV_SRC 0x10 -// V1.4 for RoadRunner -#define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10 -#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20 - - -typedef struct _PIXEL_CLOCK_PARAMETERS_V3 -{ - USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) - // 0 means disable PPLL. For VGA PPLL,make sure this value is not 0. - USHORT usRefDiv; // Reference divider - USHORT usFbDiv; // feedback divider - UCHAR ucPostDiv; // post divider - UCHAR ucFracFbDiv; // fractional feedback divider - UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2 - UCHAR ucTransmitterId; // graphic encoder id defined in objectId.h - union - { - UCHAR ucEncoderMode; // encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/ - UCHAR ucDVOConfig; // when use DVO, need to know SDR/DDR, 12bit or 24bit - }; - UCHAR ucMiscInfo; // bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel - // bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source - // bit[4]=0:use XTALIN as the source of reference divider,=1 use the pre-defined clock as the source of reference divider -}PIXEL_CLOCK_PARAMETERS_V3; - -#define PIXEL_CLOCK_PARAMETERS_LAST PIXEL_CLOCK_PARAMETERS_V2 -#define GET_PIXEL_CLOCK_PS_ALLOCATION PIXEL_CLOCK_PARAMETERS_LAST - - -typedef struct _PIXEL_CLOCK_PARAMETERS_V5 -{ - UCHAR ucCRTC; // ATOM_CRTC1~6, indicate the CRTC controller to - // drive the pixel clock. not used for DCPLL case. - union{ - UCHAR ucReserved; - UCHAR ucFracFbDiv; // [gphan] temporary to prevent build problem. remove it after driver code is changed. - }; - USHORT usPixelClock; // target the pixel clock to drive the CRTC timing - // 0 means disable PPLL/DCPLL. - USHORT usFbDiv; // feedback divider integer part. - UCHAR ucPostDiv; // post divider. - UCHAR ucRefDiv; // Reference divider - UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL - UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h, - // indicate which graphic encoder will be used. - UCHAR ucEncoderMode; // Encoder mode: - UCHAR ucMiscInfo; // bit[0]= Force program PPLL - // bit[1]= when VGA timing is used. - // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp - // bit[4]= RefClock source for PPLL. - // =0: XTLAIN( default mode ) - // =1: other external clock source, which is pre-defined - // by VBIOS depend on the feature required. - // bit[7:5]: reserved. - ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 ) - -}PIXEL_CLOCK_PARAMETERS_V5; - -#define PIXEL_CLOCK_V5_MISC_FORCE_PROG_PPLL 0x01 -#define PIXEL_CLOCK_V5_MISC_VGA_MODE 0x02 -#define PIXEL_CLOCK_V5_MISC_HDMI_BPP_MASK 0x0c -#define PIXEL_CLOCK_V5_MISC_HDMI_24BPP 0x00 -#define PIXEL_CLOCK_V5_MISC_HDMI_30BPP 0x04 -#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08 -#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10 - -typedef struct _CRTC_PIXEL_CLOCK_FREQ -{ -#if ATOM_BIG_ENDIAN - ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to - // drive the pixel clock. not used for DCPLL case. - ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing. - // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version. -#else - ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing. - // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version. - ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to - // drive the pixel clock. not used for DCPLL case. -#endif -}CRTC_PIXEL_CLOCK_FREQ; - -typedef struct _PIXEL_CLOCK_PARAMETERS_V6 -{ - union{ - CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq; // pixel clock and CRTC id frequency - ULONG ulDispEngClkFreq; // dispclk frequency - }; - USHORT usFbDiv; // feedback divider integer part. - UCHAR ucPostDiv; // post divider. - UCHAR ucRefDiv; // Reference divider - UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL - UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h, - // indicate which graphic encoder will be used. - UCHAR ucEncoderMode; // Encoder mode: - UCHAR ucMiscInfo; // bit[0]= Force program PPLL - // bit[1]= when VGA timing is used. - // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp - // bit[4]= RefClock source for PPLL. - // =0: XTLAIN( default mode ) - // =1: other external clock source, which is pre-defined - // by VBIOS depend on the feature required. - // bit[7:5]: reserved. - ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 ) - -}PIXEL_CLOCK_PARAMETERS_V6; - -#define PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL 0x01 -#define PIXEL_CLOCK_V6_MISC_VGA_MODE 0x02 -#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c -#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00 -#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04 -#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6 0x08 //for V6, the correct defintion for 36bpp should be 2 for 36bpp(2:1) -#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08 -#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6 0x04 //for V6, the correct defintion for 30bpp should be 1 for 36bpp(5:4) -#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c -#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10 -#define PIXEL_CLOCK_V6_MISC_GEN_DPREFCLK 0x40 -#define PIXEL_CLOCK_V6_MISC_DPREFCLK_BYPASS 0x40 - -typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2 -{ - PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput; -}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2; - -typedef struct _GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2 -{ - UCHAR ucStatus; - UCHAR ucRefDivSrc; // =1: reference clock source from XTALIN, =0: source from PCIE ref clock - UCHAR ucReserved[2]; -}GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2; - -typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3 -{ - PIXEL_CLOCK_PARAMETERS_V5 sDispClkInput; -}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3; - - -/****************************************************************************/ -// Structures used by AdjustDisplayPllTable -/****************************************************************************/ -typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS -{ - USHORT usPixelClock; - UCHAR ucTransmitterID; - UCHAR ucEncodeMode; - union - { - UCHAR ucDVOConfig; //if DVO, need passing link rate and output 12bitlow or 24bit - UCHAR ucConfig; //if none DVO, not defined yet - }; - UCHAR ucReserved[3]; -}ADJUST_DISPLAY_PLL_PARAMETERS; - -#define ADJUST_DISPLAY_CONFIG_SS_ENABLE 0x10 -#define ADJUST_DISPLAY_PLL_PS_ALLOCATION ADJUST_DISPLAY_PLL_PARAMETERS - -typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 -{ - USHORT usPixelClock; // target pixel clock - UCHAR ucTransmitterID; // GPU transmitter id defined in objectid.h - UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI - UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX - UCHAR ucExtTransmitterID; // external encoder id. - UCHAR ucReserved[2]; -}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3; - -// usDispPllConfig v1.2 for RoadRunner -#define DISPPLL_CONFIG_DVO_RATE_SEL 0x0001 // need only when ucTransmitterID = DVO -#define DISPPLL_CONFIG_DVO_DDR_SPEED 0x0000 // need only when ucTransmitterID = DVO -#define DISPPLL_CONFIG_DVO_SDR_SPEED 0x0001 // need only when ucTransmitterID = DVO -#define DISPPLL_CONFIG_DVO_OUTPUT_SEL 0x000c // need only when ucTransmitterID = DVO -#define DISPPLL_CONFIG_DVO_LOW12BIT 0x0000 // need only when ucTransmitterID = DVO -#define DISPPLL_CONFIG_DVO_UPPER12BIT 0x0004 // need only when ucTransmitterID = DVO -#define DISPPLL_CONFIG_DVO_24BIT 0x0008 // need only when ucTransmitterID = DVO -#define DISPPLL_CONFIG_SS_ENABLE 0x0010 // Only used when ucEncoderMode = DP or LVDS -#define DISPPLL_CONFIG_COHERENT_MODE 0x0020 // Only used when ucEncoderMode = TMDS or HDMI -#define DISPPLL_CONFIG_DUAL_LINK 0x0040 // Only used when ucEncoderMode = TMDS or LVDS - - -typedef struct _ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3 -{ - ULONG ulDispPllFreq; // return display PPLL freq which is used to generate the pixclock, and related idclk, symclk etc - UCHAR ucRefDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider and post_div ( if it is not given ) - UCHAR ucPostDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider - UCHAR ucReserved[2]; -}ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3; - -typedef struct _ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 -{ - union - { - ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 sInput; - ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3 sOutput; - }; -} ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3; - -/****************************************************************************/ -// Structures used by EnableYUVTable -/****************************************************************************/ -typedef struct _ENABLE_YUV_PARAMETERS -{ - UCHAR ucEnable; // ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB) - UCHAR ucCRTC; // Which CRTC needs this YUV or RGB format - UCHAR ucPadding[2]; -}ENABLE_YUV_PARAMETERS; -#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS - -/****************************************************************************/ -// Structures used by GetMemoryClockTable -/****************************************************************************/ -typedef struct _GET_MEMORY_CLOCK_PARAMETERS -{ - ULONG ulReturnMemoryClock; // current memory speed in 10KHz unit -} GET_MEMORY_CLOCK_PARAMETERS; -#define GET_MEMORY_CLOCK_PS_ALLOCATION GET_MEMORY_CLOCK_PARAMETERS - -/****************************************************************************/ -// Structures used by GetEngineClockTable -/****************************************************************************/ -typedef struct _GET_ENGINE_CLOCK_PARAMETERS -{ - ULONG ulReturnEngineClock; // current engine speed in 10KHz unit -} GET_ENGINE_CLOCK_PARAMETERS; -#define GET_ENGINE_CLOCK_PS_ALLOCATION GET_ENGINE_CLOCK_PARAMETERS - -/****************************************************************************/ -// Following Structures and constant may be obsolete -/****************************************************************************/ -//Maxium 8 bytes,the data read in will be placed in the parameter space. -//Read operaion successeful when the paramter space is non-zero, otherwise read operation failed -typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS -{ - USHORT usPrescale; //Ratio between Engine clock and I2C clock - USHORT usVRAMAddress; //Adress in Frame Buffer where to pace raw EDID - USHORT usStatus; //When use output: lower byte EDID checksum, high byte hardware status - //WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte - UCHAR ucSlaveAddr; //Read from which slave - UCHAR ucLineNumber; //Read from which HW assisted line -}READ_EDID_FROM_HW_I2C_DATA_PARAMETERS; -#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION READ_EDID_FROM_HW_I2C_DATA_PARAMETERS - - -#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE 0 -#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES 1 -#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK 2 -#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK 3 -#define ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK 4 - -typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS -{ - USHORT usPrescale; //Ratio between Engine clock and I2C clock - USHORT usByteOffset; //Write to which byte - //Upper portion of usByteOffset is Format of data - //1bytePS+offsetPS - //2bytesPS+offsetPS - //blockID+offsetPS - //blockID+offsetID - //blockID+counterID+offsetID - UCHAR ucData; //PS data1 - UCHAR ucStatus; //Status byte 1=success, 2=failure, Also is used as PS data2 - UCHAR ucSlaveAddr; //Write to which slave - UCHAR ucLineNumber; //Write from which HW assisted line -}WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS; - -#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS - -typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS -{ - USHORT usPrescale; //Ratio between Engine clock and I2C clock - UCHAR ucSlaveAddr; //Write to which slave - UCHAR ucLineNumber; //Write from which HW assisted line -}SET_UP_HW_I2C_DATA_PARAMETERS; - -/**************************************************************************/ -#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS - - -/****************************************************************************/ -// Structures used by PowerConnectorDetectionTable -/****************************************************************************/ -typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS -{ - UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected - UCHAR ucPwrBehaviorId; - USHORT usPwrBudget; //how much power currently boot to in unit of watt -}POWER_CONNECTOR_DETECTION_PARAMETERS; - -typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION -{ - UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected - UCHAR ucReserved; - USHORT usPwrBudget; //how much power currently boot to in unit of watt - WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; -}POWER_CONNECTOR_DETECTION_PS_ALLOCATION; - - -/****************************LVDS SS Command Table Definitions**********************/ - -/****************************************************************************/ -// Structures used by EnableSpreadSpectrumOnPPLLTable -/****************************************************************************/ -typedef struct _ENABLE_LVDS_SS_PARAMETERS -{ - USHORT usSpreadSpectrumPercentage; - UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD - UCHAR ucSpreadSpectrumStepSize_Delay; //bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY - UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE - UCHAR ucPadding[3]; -}ENABLE_LVDS_SS_PARAMETERS; - -//ucTableFormatRevision=1,ucTableContentRevision=2 -typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2 -{ - USHORT usSpreadSpectrumPercentage; - UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD - UCHAR ucSpreadSpectrumStep; // - UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE - UCHAR ucSpreadSpectrumDelay; - UCHAR ucSpreadSpectrumRange; - UCHAR ucPadding; -}ENABLE_LVDS_SS_PARAMETERS_V2; - -//This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS. -typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL -{ - USHORT usSpreadSpectrumPercentage; - UCHAR ucSpreadSpectrumType; // Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD - UCHAR ucSpreadSpectrumStep; // - UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE - UCHAR ucSpreadSpectrumDelay; - UCHAR ucSpreadSpectrumRange; - UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2 -}ENABLE_SPREAD_SPECTRUM_ON_PPLL; - - typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 -{ - USHORT usSpreadSpectrumPercentage; - UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread. - // Bit[1]: 1-Ext. 0-Int. - // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL - // Bits[7:4] reserved - UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE - USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8] - USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC -}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2; - -#define ATOM_PPLL_SS_TYPE_V2_DOWN_SPREAD 0x00 -#define ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD 0x01 -#define ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD 0x02 -#define ATOM_PPLL_SS_TYPE_V2_PPLL_SEL_MASK 0x0c -#define ATOM_PPLL_SS_TYPE_V2_P1PLL 0x00 -#define ATOM_PPLL_SS_TYPE_V2_P2PLL 0x04 -#define ATOM_PPLL_SS_TYPE_V2_DCPLL 0x08 -#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK 0x00FF -#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT 0 -#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00 -#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8 - -// Used by DCE5.0 - typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 -{ - USHORT usSpreadSpectrumAmountFrac; // SS_AMOUNT_DSFRAC New in DCE5.0 - UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread. - // Bit[1]: 1-Ext. 0-Int. - // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL - // Bits[7:4] reserved - UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE - USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8] - USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC -}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3; - - -#define ATOM_PPLL_SS_TYPE_V3_DOWN_SPREAD 0x00 -#define ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD 0x01 -#define ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD 0x02 -#define ATOM_PPLL_SS_TYPE_V3_PPLL_SEL_MASK 0x0c -#define ATOM_PPLL_SS_TYPE_V3_P1PLL 0x00 -#define ATOM_PPLL_SS_TYPE_V3_P2PLL 0x04 -#define ATOM_PPLL_SS_TYPE_V3_DCPLL 0x08 -#define ATOM_PPLL_SS_TYPE_V3_P0PLL ATOM_PPLL_SS_TYPE_V3_DCPLL -#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK 0x00FF -#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT 0 -#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK 0x0F00 -#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT 8 - -#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL - -typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION -{ - PIXEL_CLOCK_PARAMETERS sPCLKInput; - ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;//Caller doesn't need to init this portion -}SET_PIXEL_CLOCK_PS_ALLOCATION; - - - -#define ENABLE_VGA_RENDER_PS_ALLOCATION SET_PIXEL_CLOCK_PS_ALLOCATION - -/****************************************************************************/ -// Structures used by ### -/****************************************************************************/ -typedef struct _MEMORY_TRAINING_PARAMETERS -{ - ULONG ulTargetMemoryClock; //In 10Khz unit -}MEMORY_TRAINING_PARAMETERS; -#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS - - -typedef struct _MEMORY_TRAINING_PARAMETERS_V1_2 -{ - USHORT usMemTrainingMode; - USHORT usReserved; -}MEMORY_TRAINING_PARAMETERS_V1_2; - -//usMemTrainingMode -#define NORMAL_MEMORY_TRAINING_MODE 0 -#define ENTER_DRAM_SELFREFRESH_MODE 1 -#define EXIT_DRAM_SELFRESH_MODE 2 - -/****************************LVDS and other encoder command table definitions **********************/ - - -/****************************************************************************/ -// Structures used by LVDSEncoderControlTable (Before DEC30) -// LVTMAEncoderControlTable (Before DEC30) -// TMDSAEncoderControlTable (Before DEC30) -/****************************************************************************/ -typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS -{ - USHORT usPixelClock; // in 10KHz; for bios convenient - UCHAR ucMisc; // bit0=0: Enable single link - // =1: Enable dual link - // Bit1=0: 666RGB - // =1: 888RGB - UCHAR ucAction; // 0: turn off encoder - // 1: setup and turn on encoder -}LVDS_ENCODER_CONTROL_PARAMETERS; - -#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS - -#define TMDS1_ENCODER_CONTROL_PARAMETERS LVDS_ENCODER_CONTROL_PARAMETERS -#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS - -#define TMDS2_ENCODER_CONTROL_PARAMETERS TMDS1_ENCODER_CONTROL_PARAMETERS -#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS - -//ucTableFormatRevision=1,ucTableContentRevision=2 -typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 -{ - USHORT usPixelClock; // in 10KHz; for bios convenient - UCHAR ucMisc; // see PANEL_ENCODER_MISC_xx defintions below - UCHAR ucAction; // 0: turn off encoder - // 1: setup and turn on encoder - UCHAR ucTruncate; // bit0=0: Disable truncate - // =1: Enable truncate - // bit4=0: 666RGB - // =1: 888RGB - UCHAR ucSpatial; // bit0=0: Disable spatial dithering - // =1: Enable spatial dithering - // bit4=0: 666RGB - // =1: 888RGB - UCHAR ucTemporal; // bit0=0: Disable temporal dithering - // =1: Enable temporal dithering - // bit4=0: 666RGB - // =1: 888RGB - // bit5=0: Gray level 2 - // =1: Gray level 4 - UCHAR ucFRC; // bit4=0: 25FRC_SEL pattern E - // =1: 25FRC_SEL pattern F - // bit6:5=0: 50FRC_SEL pattern A - // =1: 50FRC_SEL pattern B - // =2: 50FRC_SEL pattern C - // =3: 50FRC_SEL pattern D - // bit7=0: 75FRC_SEL pattern E - // =1: 75FRC_SEL pattern F -}LVDS_ENCODER_CONTROL_PARAMETERS_V2; - -#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 - -#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 -#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2 - -#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2 -#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2 - - -#define LVDS_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V2 -#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3 - -#define TMDS1_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3 -#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS1_ENCODER_CONTROL_PARAMETERS_V3 - -#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3 -#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3 - -/****************************************************************************/ -// Structures used by ### -/****************************************************************************/ -typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS -{ - UCHAR ucEnable; // Enable or Disable External TMDS encoder - UCHAR ucMisc; // Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB} - UCHAR ucPadding[2]; -}ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS; - -typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION -{ - ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder; - WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion -}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION; - -#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 -typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2 -{ - ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder; - WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion -}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2; - -typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION -{ - DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder; - WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; -}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION; - -/****************************************************************************/ -// Structures used by DVOEncoderControlTable -/****************************************************************************/ -//ucTableFormatRevision=1,ucTableContentRevision=3 -//ucDVOConfig: -#define DVO_ENCODER_CONFIG_RATE_SEL 0x01 -#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 -#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 -#define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c -#define DVO_ENCODER_CONFIG_LOW12BIT 0x00 -#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 -#define DVO_ENCODER_CONFIG_24BIT 0x08 - -typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 -{ - USHORT usPixelClock; - UCHAR ucDVOConfig; - UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT - UCHAR ucReseved[4]; -}DVO_ENCODER_CONTROL_PARAMETERS_V3; -#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3 - -typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V1_4 -{ - USHORT usPixelClock; - UCHAR ucDVOConfig; - UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT - UCHAR ucBitPerColor; //please refer to definition of PANEL_xBIT_PER_COLOR - UCHAR ucReseved[3]; -}DVO_ENCODER_CONTROL_PARAMETERS_V1_4; -#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V1_4 DVO_ENCODER_CONTROL_PARAMETERS_V1_4 - - -//ucTableFormatRevision=1 -//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for -// bit1=0: non-coherent mode -// =1: coherent mode - -//========================================================================================== -//Only change is here next time when changing encoder parameter definitions again! -#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3 -#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST LVDS_ENCODER_CONTROL_PARAMETERS_LAST - -#define TMDS1_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3 -#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS1_ENCODER_CONTROL_PARAMETERS_LAST - -#define TMDS2_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3 -#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS2_ENCODER_CONTROL_PARAMETERS_LAST - -#define DVO_ENCODER_CONTROL_PARAMETERS_LAST DVO_ENCODER_CONTROL_PARAMETERS -#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST DVO_ENCODER_CONTROL_PS_ALLOCATION - -//========================================================================================== -#define PANEL_ENCODER_MISC_DUAL 0x01 -#define PANEL_ENCODER_MISC_COHERENT 0x02 -#define PANEL_ENCODER_MISC_TMDS_LINKB 0x04 -#define PANEL_ENCODER_MISC_HDMI_TYPE 0x08 - -#define PANEL_ENCODER_ACTION_DISABLE ATOM_DISABLE -#define PANEL_ENCODER_ACTION_ENABLE ATOM_ENABLE -#define PANEL_ENCODER_ACTION_COHERENTSEQ (ATOM_ENABLE+1) - -#define PANEL_ENCODER_TRUNCATE_EN 0x01 -#define PANEL_ENCODER_TRUNCATE_DEPTH 0x10 -#define PANEL_ENCODER_SPATIAL_DITHER_EN 0x01 -#define PANEL_ENCODER_SPATIAL_DITHER_DEPTH 0x10 -#define PANEL_ENCODER_TEMPORAL_DITHER_EN 0x01 -#define PANEL_ENCODER_TEMPORAL_DITHER_DEPTH 0x10 -#define PANEL_ENCODER_TEMPORAL_LEVEL_4 0x20 -#define PANEL_ENCODER_25FRC_MASK 0x10 -#define PANEL_ENCODER_25FRC_E 0x00 -#define PANEL_ENCODER_25FRC_F 0x10 -#define PANEL_ENCODER_50FRC_MASK 0x60 -#define PANEL_ENCODER_50FRC_A 0x00 -#define PANEL_ENCODER_50FRC_B 0x20 -#define PANEL_ENCODER_50FRC_C 0x40 -#define PANEL_ENCODER_50FRC_D 0x60 -#define PANEL_ENCODER_75FRC_MASK 0x80 -#define PANEL_ENCODER_75FRC_E 0x00 -#define PANEL_ENCODER_75FRC_F 0x80 - -/****************************************************************************/ -// Structures used by SetVoltageTable -/****************************************************************************/ -#define SET_VOLTAGE_TYPE_ASIC_VDDC 1 -#define SET_VOLTAGE_TYPE_ASIC_MVDDC 2 -#define SET_VOLTAGE_TYPE_ASIC_MVDDQ 3 -#define SET_VOLTAGE_TYPE_ASIC_VDDCI 4 -#define SET_VOLTAGE_INIT_MODE 5 -#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 //Gets the Max. voltage for the soldered Asic - -#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE 0x1 -#define SET_ASIC_VOLTAGE_MODE_SOURCE_A 0x2 -#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4 - -#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0 -#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1 -#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2 - -typedef struct _SET_VOLTAGE_PARAMETERS -{ - UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ - UCHAR ucVoltageMode; // To set all, to set source A or source B or ... - UCHAR ucVoltageIndex; // An index to tell which voltage level - UCHAR ucReserved; -}SET_VOLTAGE_PARAMETERS; - -typedef struct _SET_VOLTAGE_PARAMETERS_V2 -{ - UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ - UCHAR ucVoltageMode; // Not used, maybe use for state machine for differen power mode - USHORT usVoltageLevel; // real voltage level -}SET_VOLTAGE_PARAMETERS_V2; - -// used by both SetVoltageTable v1.3 and v1.4 -typedef struct _SET_VOLTAGE_PARAMETERS_V1_3 -{ - UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI - UCHAR ucVoltageMode; // Indicate action: Set voltage level - USHORT usVoltageLevel; // real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) -}SET_VOLTAGE_PARAMETERS_V1_3; - -//ucVoltageType -#define VOLTAGE_TYPE_VDDC 1 -#define VOLTAGE_TYPE_MVDDC 2 -#define VOLTAGE_TYPE_MVDDQ 3 -#define VOLTAGE_TYPE_VDDCI 4 -#define VOLTAGE_TYPE_VDDGFX 5 -#define VOLTAGE_TYPE_PCC 6 - -#define VOLTAGE_TYPE_GENERIC_I2C_1 0x11 -#define VOLTAGE_TYPE_GENERIC_I2C_2 0x12 -#define VOLTAGE_TYPE_GENERIC_I2C_3 0x13 -#define VOLTAGE_TYPE_GENERIC_I2C_4 0x14 -#define VOLTAGE_TYPE_GENERIC_I2C_5 0x15 -#define VOLTAGE_TYPE_GENERIC_I2C_6 0x16 -#define VOLTAGE_TYPE_GENERIC_I2C_7 0x17 -#define VOLTAGE_TYPE_GENERIC_I2C_8 0x18 -#define VOLTAGE_TYPE_GENERIC_I2C_9 0x19 -#define VOLTAGE_TYPE_GENERIC_I2C_10 0x1A - -//SET_VOLTAGE_PARAMETERS_V3.ucVoltageMode -#define ATOM_SET_VOLTAGE 0 //Set voltage Level -#define ATOM_INIT_VOLTAGE_REGULATOR 3 //Init Regulator -#define ATOM_SET_VOLTAGE_PHASE 4 //Set Vregulator Phase, only for SVID/PVID regulator -#define ATOM_GET_MAX_VOLTAGE 6 //Get Max Voltage, not used from SetVoltageTable v1.3 -#define ATOM_GET_VOLTAGE_LEVEL 6 //Get Voltage level from vitual voltage ID, not used for SetVoltage v1.4 -#define ATOM_GET_LEAKAGE_ID 8 //Get Leakage Voltage Id ( starting from SMU7x IP ), SetVoltage v1.4 - -// define vitual voltage id in usVoltageLevel -#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01 -#define ATOM_VIRTUAL_VOLTAGE_ID1 0xff02 -#define ATOM_VIRTUAL_VOLTAGE_ID2 0xff03 -#define ATOM_VIRTUAL_VOLTAGE_ID3 0xff04 -#define ATOM_VIRTUAL_VOLTAGE_ID4 0xff05 -#define ATOM_VIRTUAL_VOLTAGE_ID5 0xff06 -#define ATOM_VIRTUAL_VOLTAGE_ID6 0xff07 -#define ATOM_VIRTUAL_VOLTAGE_ID7 0xff08 - -typedef struct _SET_VOLTAGE_PS_ALLOCATION -{ - SET_VOLTAGE_PARAMETERS sASICSetVoltage; - WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; -}SET_VOLTAGE_PS_ALLOCATION; - -// New Added from SI for GetVoltageInfoTable, input parameter structure -typedef struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1 -{ - UCHAR ucVoltageType; // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI - UCHAR ucVoltageMode; // Input: Indicate action: Get voltage info - USHORT usVoltageLevel; // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id - ULONG ulReserved; -}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1; - -// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_VID -typedef struct _GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1 -{ - ULONG ulVotlageGpioState; - ULONG ulVoltageGPioMask; -}GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1; - -// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_STATEx_LEAKAGE_VID -typedef struct _GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1 -{ - USHORT usVoltageLevel; - USHORT usVoltageId; // Voltage Id programmed in Voltage Regulator - ULONG ulReseved; -}GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1; - -// GetVoltageInfo v1.1 ucVoltageMode -#define ATOM_GET_VOLTAGE_VID 0x00 -#define ATOM_GET_VOTLAGE_INIT_SEQ 0x03 -#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID 0x04 -#define ATOM_GET_VOLTAGE_SVID2 0x07 //Get SVI2 Regulator Info - -// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state -#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10 -// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state -#define ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11 - -#define ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12 -#define ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13 - - -// New Added from CI Hawaii for GetVoltageInfoTable, input parameter structure -typedef struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 -{ - UCHAR ucVoltageType; // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI - UCHAR ucVoltageMode; // Input: Indicate action: Get voltage info - USHORT usVoltageLevel; // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id - ULONG ulSCLKFreq; // Input: when ucVoltageMode= ATOM_GET_VOLTAGE_EVV_VOLTAGE, DPM state SCLK frequency, Define in PPTable SCLK/Voltage dependence table -}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2; - -// New in GetVoltageInfo v1.2 ucVoltageMode -#define ATOM_GET_VOLTAGE_EVV_VOLTAGE 0x09 - -// New Added from CI Hawaii for EVV feature -typedef struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 -{ - USHORT usVoltageLevel; // real voltage level in unit of mv - USHORT usVoltageId; // Voltage Id programmed in Voltage Regulator - USHORT usTDP_Current; // TDP_Current in unit of 0.01A - USHORT usTDP_Power; // TDP_Current in unit of 0.1W -}GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2; - -/****************************************************************************/ -// Structures used by TVEncoderControlTable -/****************************************************************************/ -typedef struct _TV_ENCODER_CONTROL_PARAMETERS -{ - USHORT usPixelClock; // in 10KHz; for bios convenient - UCHAR ucTvStandard; // See definition "ATOM_TV_NTSC ..." - UCHAR ucAction; // 0: turn off encoder - // 1: setup and turn on encoder -}TV_ENCODER_CONTROL_PARAMETERS; - -typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION -{ - TV_ENCODER_CONTROL_PARAMETERS sTVEncoder; - WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; // Don't set this one -}TV_ENCODER_CONTROL_PS_ALLOCATION; - -//==============================Data Table Portion==================================== - - -/****************************************************************************/ -// Structure used in Data.mtb -/****************************************************************************/ -typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES -{ - USHORT UtilityPipeLine; // Offest for the utility to get parser info,Don't change this position! - USHORT MultimediaCapabilityInfo; // Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios - USHORT MultimediaConfigInfo; // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios - USHORT StandardVESA_Timing; // Only used by Bios - USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4 - USHORT PaletteData; // Only used by BIOS - USHORT LCD_Info; // Shared by various SW components,latest version 1.3, was called LVDS_Info - USHORT DIGTransmitterInfo; // Internal used by VBIOS only version 3.1 - USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1 - USHORT SupportedDevicesInfo; // Will be obsolete from R600 - USHORT GPIO_I2C_Info; // Shared by various SW components,latest version 1.2 will be used from R600 - USHORT VRAM_UsageByFirmware; // Shared by various SW components,latest version 1.3 will be used from R600 - USHORT GPIO_Pin_LUT; // Shared by various SW components,latest version 1.1 - USHORT VESA_ToInternalModeLUT; // Only used by Bios - USHORT ComponentVideoInfo; // Shared by various SW components,latest version 2.1 will be used from R600 - USHORT PowerPlayInfo; // Shared by various SW components,latest version 2.1,new design from R600 - USHORT GPUVirtualizationInfo; // Will be obsolete from R600 - USHORT SaveRestoreInfo; // Only used by Bios - USHORT PPLL_SS_Info; // Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info - USHORT OemInfo; // Defined and used by external SW, should be obsolete soon - USHORT XTMDS_Info; // Will be obsolete from R600 - USHORT MclkSS_Info; // Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used - USHORT Object_Header; // Shared by various SW components,latest version 1.1 - USHORT IndirectIOAccess; // Only used by Bios,this table position can't change at all!! - USHORT MC_InitParameter; // Only used by command table - USHORT ASIC_VDDC_Info; // Will be obsolete from R600 - USHORT ASIC_InternalSS_Info; // New tabel name from R600, used to be called "ASIC_MVDDC_Info" - USHORT TV_VideoMode; // Only used by command table - USHORT VRAM_Info; // Only used by command table, latest version 1.3 - USHORT MemoryTrainingInfo; // Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1 - USHORT IntegratedSystemInfo; // Shared by various SW components - USHORT ASIC_ProfilingInfo; // New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600 - USHORT VoltageObjectInfo; // Shared by various SW components, latest version 1.1 - USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1 - USHORT ServiceInfo; -}ATOM_MASTER_LIST_OF_DATA_TABLES; - -typedef struct _ATOM_MASTER_DATA_TABLE -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables; -}ATOM_MASTER_DATA_TABLE; - -// For backward compatible -#define LVDS_Info LCD_Info -#define DAC_Info PaletteData -#define TMDS_Info DIGTransmitterInfo -#define CompassionateData GPUVirtualizationInfo - -/****************************************************************************/ -// Structure used in MultimediaCapabilityInfoTable -/****************************************************************************/ -typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulSignature; // HW info table signature string "$ATI" - UCHAR ucI2C_Type; // I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc) - UCHAR ucTV_OutInfo; // Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7) - UCHAR ucVideoPortInfo; // Provides the video port capabilities - UCHAR ucHostPortInfo; // Provides host port configuration information -}ATOM_MULTIMEDIA_CAPABILITY_INFO; - - -/****************************************************************************/ -// Structure used in MultimediaConfigInfoTable -/****************************************************************************/ -typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulSignature; // MM info table signature sting "$MMT" - UCHAR ucTunerInfo; // Type of tuner installed on the adapter (4:0) and video input for tuner (7:5) - UCHAR ucAudioChipInfo; // List the audio chip type (3:0) product type (4) and OEM revision (7:5) - UCHAR ucProductID; // Defines as OEM ID or ATI board ID dependent on product type setting - UCHAR ucMiscInfo1; // Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7) - UCHAR ucMiscInfo2; // I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6) - UCHAR ucMiscInfo3; // Video Decoder Type (3:0) Video In Standard/Crystal (7:4) - UCHAR ucMiscInfo4; // Video Decoder Host Config (2:0) reserved (7:3) - UCHAR ucVideoInput0Info;// Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) - UCHAR ucVideoInput1Info;// Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) - UCHAR ucVideoInput2Info;// Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) - UCHAR ucVideoInput3Info;// Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) - UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) -}ATOM_MULTIMEDIA_CONFIG_INFO; - - -/****************************************************************************/ -// Structures used in FirmwareInfoTable -/****************************************************************************/ - -// usBIOSCapability Defintion: -// Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; -// Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; -// Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; -// Others: Reserved -#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED 0x0001 -#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT 0x0002 -#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT 0x0004 -#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008 // (valid from v1.1 ~v1.4):=1: memclk SS enable, =0 memclk SS disable. -#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010 // (valid from v1.1 ~v1.4):=1: engclk SS enable, =0 engclk SS disable. -#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU 0x0020 -#define ATOM_BIOS_INFO_WMI_SUPPORT 0x0040 -#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM 0x0080 -#define ATOM_BIOS_INFO_HYPERMEMORY_SUPPORT 0x0100 -#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK 0x1E00 -#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000 -#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE 0x4000 -#define ATOM_BIOS_INFO_MEMORY_CLOCK_EXT_SS_SUPPORT 0x0008 // (valid from v2.1 ): =1: memclk ss enable with external ss chip -#define ATOM_BIOS_INFO_ENGINE_CLOCK_EXT_SS_SUPPORT 0x0010 // (valid from v2.1 ): =1: engclk ss enable with external ss chip - - -#ifndef _H2INC - -//Please don't add or expand this bitfield structure below, this one will retire soon.! -typedef struct _ATOM_FIRMWARE_CAPABILITY -{ -#if ATOM_BIG_ENDIAN - USHORT Reserved:1; - USHORT SCL2Redefined:1; - USHORT PostWithoutModeSet:1; - USHORT HyperMemory_Size:4; - USHORT HyperMemory_Support:1; - USHORT PPMode_Assigned:1; - USHORT WMI_SUPPORT:1; - USHORT GPUControlsBL:1; - USHORT EngineClockSS_Support:1; - USHORT MemoryClockSS_Support:1; - USHORT ExtendedDesktopSupport:1; - USHORT DualCRTC_Support:1; - USHORT FirmwarePosted:1; -#else - USHORT FirmwarePosted:1; - USHORT DualCRTC_Support:1; - USHORT ExtendedDesktopSupport:1; - USHORT MemoryClockSS_Support:1; - USHORT EngineClockSS_Support:1; - USHORT GPUControlsBL:1; - USHORT WMI_SUPPORT:1; - USHORT PPMode_Assigned:1; - USHORT HyperMemory_Support:1; - USHORT HyperMemory_Size:4; - USHORT PostWithoutModeSet:1; - USHORT SCL2Redefined:1; - USHORT Reserved:1; -#endif -}ATOM_FIRMWARE_CAPABILITY; - -typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS -{ - ATOM_FIRMWARE_CAPABILITY sbfAccess; - USHORT susAccess; -}ATOM_FIRMWARE_CAPABILITY_ACCESS; - -#else - -typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS -{ - USHORT susAccess; -}ATOM_FIRMWARE_CAPABILITY_ACCESS; - -#endif - -typedef struct _ATOM_FIRMWARE_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulFirmwareRevision; - ULONG ulDefaultEngineClock; //In 10Khz unit - ULONG ulDefaultMemoryClock; //In 10Khz unit - ULONG ulDriverTargetEngineClock; //In 10Khz unit - ULONG ulDriverTargetMemoryClock; //In 10Khz unit - ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit - ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit - ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit - ULONG ulASICMaxEngineClock; //In 10Khz unit - ULONG ulASICMaxMemoryClock; //In 10Khz unit - UCHAR ucASICMaxTemperature; - UCHAR ucPadding[3]; //Don't use them - ULONG aulReservedForBIOS[3]; //Don't use them - USHORT usMinEngineClockPLL_Input; //In 10Khz unit - USHORT usMaxEngineClockPLL_Input; //In 10Khz unit - USHORT usMinEngineClockPLL_Output; //In 10Khz unit - USHORT usMinMemoryClockPLL_Input; //In 10Khz unit - USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit - USHORT usMinMemoryClockPLL_Output; //In 10Khz unit - USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk - USHORT usMinPixelClockPLL_Input; //In 10Khz unit - USHORT usMaxPixelClockPLL_Input; //In 10Khz unit - USHORT usMinPixelClockPLL_Output; //In 10Khz unit, the definitions above can't change!!! - ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; - USHORT usReferenceClock; //In 10Khz unit - USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit - UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit - UCHAR ucDesign_ID; //Indicate what is the board design - UCHAR ucMemoryModule_ID; //Indicate what is the board design -}ATOM_FIRMWARE_INFO; - -typedef struct _ATOM_FIRMWARE_INFO_V1_2 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulFirmwareRevision; - ULONG ulDefaultEngineClock; //In 10Khz unit - ULONG ulDefaultMemoryClock; //In 10Khz unit - ULONG ulDriverTargetEngineClock; //In 10Khz unit - ULONG ulDriverTargetMemoryClock; //In 10Khz unit - ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit - ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit - ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit - ULONG ulASICMaxEngineClock; //In 10Khz unit - ULONG ulASICMaxMemoryClock; //In 10Khz unit - UCHAR ucASICMaxTemperature; - UCHAR ucMinAllowedBL_Level; - UCHAR ucPadding[2]; //Don't use them - ULONG aulReservedForBIOS[2]; //Don't use them - ULONG ulMinPixelClockPLL_Output; //In 10Khz unit - USHORT usMinEngineClockPLL_Input; //In 10Khz unit - USHORT usMaxEngineClockPLL_Input; //In 10Khz unit - USHORT usMinEngineClockPLL_Output; //In 10Khz unit - USHORT usMinMemoryClockPLL_Input; //In 10Khz unit - USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit - USHORT usMinMemoryClockPLL_Output; //In 10Khz unit - USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk - USHORT usMinPixelClockPLL_Input; //In 10Khz unit - USHORT usMaxPixelClockPLL_Input; //In 10Khz unit - USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output - ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; - USHORT usReferenceClock; //In 10Khz unit - USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit - UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit - UCHAR ucDesign_ID; //Indicate what is the board design - UCHAR ucMemoryModule_ID; //Indicate what is the board design -}ATOM_FIRMWARE_INFO_V1_2; - -typedef struct _ATOM_FIRMWARE_INFO_V1_3 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulFirmwareRevision; - ULONG ulDefaultEngineClock; //In 10Khz unit - ULONG ulDefaultMemoryClock; //In 10Khz unit - ULONG ulDriverTargetEngineClock; //In 10Khz unit - ULONG ulDriverTargetMemoryClock; //In 10Khz unit - ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit - ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit - ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit - ULONG ulASICMaxEngineClock; //In 10Khz unit - ULONG ulASICMaxMemoryClock; //In 10Khz unit - UCHAR ucASICMaxTemperature; - UCHAR ucMinAllowedBL_Level; - UCHAR ucPadding[2]; //Don't use them - ULONG aulReservedForBIOS; //Don't use them - ULONG ul3DAccelerationEngineClock;//In 10Khz unit - ULONG ulMinPixelClockPLL_Output; //In 10Khz unit - USHORT usMinEngineClockPLL_Input; //In 10Khz unit - USHORT usMaxEngineClockPLL_Input; //In 10Khz unit - USHORT usMinEngineClockPLL_Output; //In 10Khz unit - USHORT usMinMemoryClockPLL_Input; //In 10Khz unit - USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit - USHORT usMinMemoryClockPLL_Output; //In 10Khz unit - USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk - USHORT usMinPixelClockPLL_Input; //In 10Khz unit - USHORT usMaxPixelClockPLL_Input; //In 10Khz unit - USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output - ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; - USHORT usReferenceClock; //In 10Khz unit - USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit - UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit - UCHAR ucDesign_ID; //Indicate what is the board design - UCHAR ucMemoryModule_ID; //Indicate what is the board design -}ATOM_FIRMWARE_INFO_V1_3; - -typedef struct _ATOM_FIRMWARE_INFO_V1_4 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulFirmwareRevision; - ULONG ulDefaultEngineClock; //In 10Khz unit - ULONG ulDefaultMemoryClock; //In 10Khz unit - ULONG ulDriverTargetEngineClock; //In 10Khz unit - ULONG ulDriverTargetMemoryClock; //In 10Khz unit - ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit - ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit - ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit - ULONG ulASICMaxEngineClock; //In 10Khz unit - ULONG ulASICMaxMemoryClock; //In 10Khz unit - UCHAR ucASICMaxTemperature; - UCHAR ucMinAllowedBL_Level; - USHORT usBootUpVDDCVoltage; //In MV unit - USHORT usLcdMinPixelClockPLL_Output; // In MHz unit - USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit - ULONG ul3DAccelerationEngineClock;//In 10Khz unit - ULONG ulMinPixelClockPLL_Output; //In 10Khz unit - USHORT usMinEngineClockPLL_Input; //In 10Khz unit - USHORT usMaxEngineClockPLL_Input; //In 10Khz unit - USHORT usMinEngineClockPLL_Output; //In 10Khz unit - USHORT usMinMemoryClockPLL_Input; //In 10Khz unit - USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit - USHORT usMinMemoryClockPLL_Output; //In 10Khz unit - USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk - USHORT usMinPixelClockPLL_Input; //In 10Khz unit - USHORT usMaxPixelClockPLL_Input; //In 10Khz unit - USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output - ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; - USHORT usReferenceClock; //In 10Khz unit - USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit - UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit - UCHAR ucDesign_ID; //Indicate what is the board design - UCHAR ucMemoryModule_ID; //Indicate what is the board design -}ATOM_FIRMWARE_INFO_V1_4; - -//the structure below to be used from Cypress -typedef struct _ATOM_FIRMWARE_INFO_V2_1 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulFirmwareRevision; - ULONG ulDefaultEngineClock; //In 10Khz unit - ULONG ulDefaultMemoryClock; //In 10Khz unit - ULONG ulReserved1; - ULONG ulReserved2; - ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit - ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit - ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit - ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock - ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit - UCHAR ucReserved1; //Was ucASICMaxTemperature; - UCHAR ucMinAllowedBL_Level; - USHORT usBootUpVDDCVoltage; //In MV unit - USHORT usLcdMinPixelClockPLL_Output; // In MHz unit - USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit - ULONG ulReserved4; //Was ulAsicMaximumVoltage - ULONG ulMinPixelClockPLL_Output; //In 10Khz unit - USHORT usMinEngineClockPLL_Input; //In 10Khz unit - USHORT usMaxEngineClockPLL_Input; //In 10Khz unit - USHORT usMinEngineClockPLL_Output; //In 10Khz unit - USHORT usMinMemoryClockPLL_Input; //In 10Khz unit - USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit - USHORT usMinMemoryClockPLL_Output; //In 10Khz unit - USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk - USHORT usMinPixelClockPLL_Input; //In 10Khz unit - USHORT usMaxPixelClockPLL_Input; //In 10Khz unit - USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output - ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; - USHORT usCoreReferenceClock; //In 10Khz unit - USHORT usMemoryReferenceClock; //In 10Khz unit - USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock - UCHAR ucMemoryModule_ID; //Indicate what is the board design - UCHAR ucReserved4[3]; - -}ATOM_FIRMWARE_INFO_V2_1; - -//the structure below to be used from NI -//ucTableFormatRevision=2 -//ucTableContentRevision=2 - -typedef struct _PRODUCT_BRANDING -{ - UCHAR ucEMBEDDED_CAP:2; // Bit[1:0] Embedded feature level - UCHAR ucReserved:2; // Bit[3:2] Reserved - UCHAR ucBRANDING_ID:4; // Bit[7:4] Branding ID -}PRODUCT_BRANDING; - -typedef struct _ATOM_FIRMWARE_INFO_V2_2 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulFirmwareRevision; - ULONG ulDefaultEngineClock; //In 10Khz unit - ULONG ulDefaultMemoryClock; //In 10Khz unit - ULONG ulSPLL_OutputFreq; //In 10Khz unit - ULONG ulGPUPLL_OutputFreq; //In 10Khz unit - ULONG ulReserved1; //Was ulMaxEngineClockPLL_Output; //In 10Khz unit* - ULONG ulReserved2; //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit* - ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit - ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock ? - ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit. This is the frequency before DCDTO, corresponding to usBootUpVDDCVoltage. - UCHAR ucReserved3; //Was ucASICMaxTemperature; - UCHAR ucMinAllowedBL_Level; - USHORT usBootUpVDDCVoltage; //In MV unit - USHORT usLcdMinPixelClockPLL_Output; // In MHz unit - USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit - ULONG ulReserved4; //Was ulAsicMaximumVoltage - ULONG ulMinPixelClockPLL_Output; //In 10Khz unit - UCHAR ucRemoteDisplayConfig; - UCHAR ucReserved5[3]; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input - ULONG ulReserved6; //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input - ULONG ulReserved7; //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output - USHORT usReserved11; //Was usMaxPixelClock; //In 10Khz unit, Max. Pclk used only for DAC - USHORT usMinPixelClockPLL_Input; //In 10Khz unit - USHORT usMaxPixelClockPLL_Input; //In 10Khz unit - USHORT usBootUpVDDCIVoltage; //In unit of mv; Was usMinPixelClockPLL_Output; - ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; - USHORT usCoreReferenceClock; //In 10Khz unit - USHORT usMemoryReferenceClock; //In 10Khz unit - USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock - UCHAR ucMemoryModule_ID; //Indicate what is the board design - UCHAR ucCoolingSolution_ID; //0: Air cooling; 1: Liquid cooling ... [COOLING_SOLUTION] - PRODUCT_BRANDING ucProductBranding; // Bit[7:4]ucBRANDING_ID: Branding ID, Bit[3:2]ucReserved: Reserved, Bit[1:0]ucEMBEDDED_CAP: Embedded feature level. - UCHAR ucReserved9; - USHORT usBootUpMVDDCVoltage; //In unit of mv; Was usMinPixelClockPLL_Output; - USHORT usBootUpVDDGFXVoltage; //In unit of mv; - ULONG ulReserved10[3]; // New added comparing to previous version -}ATOM_FIRMWARE_INFO_V2_2; - -#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_2 - - -// definition of ucRemoteDisplayConfig -#define REMOTE_DISPLAY_DISABLE 0x00 -#define REMOTE_DISPLAY_ENABLE 0x01 - -/****************************************************************************/ -// Structures used in IntegratedSystemInfoTable -/****************************************************************************/ -#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN 0x2 -#define IGP_CAP_FLAG_AC_CARD 0x4 -#define IGP_CAP_FLAG_SDVO_CARD 0x8 -#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE 0x10 - -typedef struct _ATOM_INTEGRATED_SYSTEM_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulBootUpEngineClock; //in 10kHz unit - ULONG ulBootUpMemoryClock; //in 10kHz unit - ULONG ulMaxSystemMemoryClock; //in 10kHz unit - ULONG ulMinSystemMemoryClock; //in 10kHz unit - UCHAR ucNumberOfCyclesInPeriodHi; - UCHAR ucLCDTimingSel; //=0:not valid.!=0 sel this timing descriptor from LCD EDID. - USHORT usReserved1; - USHORT usInterNBVoltageLow; //An intermidiate PMW value to set the voltage - USHORT usInterNBVoltageHigh; //Another intermidiate PMW value to set the voltage - ULONG ulReserved[2]; - - USHORT usFSBClock; //In MHz unit - USHORT usCapabilityFlag; //Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable - //Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card - //Bit[4]==1: P/2 mode, ==0: P/1 mode - USHORT usPCIENBCfgReg7; //bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal - USHORT usK8MemoryClock; //in MHz unit - USHORT usK8SyncStartDelay; //in 0.01 us unit - USHORT usK8DataReturnTime; //in 0.01 us unit - UCHAR ucMaxNBVoltage; - UCHAR ucMinNBVoltage; - UCHAR ucMemoryType; //[7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved - UCHAR ucNumberOfCyclesInPeriod; //CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod - UCHAR ucStartingPWM_HighTime; //CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime - UCHAR ucHTLinkWidth; //16 bit vs. 8 bit - UCHAR ucMaxNBVoltageHigh; - UCHAR ucMinNBVoltageHigh; -}ATOM_INTEGRATED_SYSTEM_INFO; - -/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO -ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock - For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock -ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 - For AMD IGP,for now this can be 0 -ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 - For AMD IGP,for now this can be 0 - -usFSBClock: For Intel IGP,it's FSB Freq - For AMD IGP,it's HT Link Speed - -usK8MemoryClock: For AMD IGP only. For RevF CPU, set it to 200 -usK8SyncStartDelay: For AMD IGP only. Memory access latency in K8, required for watermark calculation -usK8DataReturnTime: For AMD IGP only. Memory access latency in K8, required for watermark calculation - -VC:Voltage Control -ucMaxNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all. -ucMinNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all. - -ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value. -ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0 - -ucMaxNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all. -ucMinNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all. - - -usInterNBVoltageLow: Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all. -usInterNBVoltageHigh: Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all. -*/ - - -/* -The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST; -Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need. -The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries. - -SW components can access the IGP system infor structure in the same way as before -*/ - - -typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulBootUpEngineClock; //in 10kHz unit - ULONG ulReserved1[2]; //must be 0x0 for the reserved - ULONG ulBootUpUMAClock; //in 10kHz unit - ULONG ulBootUpSidePortClock; //in 10kHz unit - ULONG ulMinSidePortClock; //in 10kHz unit - ULONG ulReserved2[6]; //must be 0x0 for the reserved - ULONG ulSystemConfig; //see explanation below - ULONG ulBootUpReqDisplayVector; - ULONG ulOtherDisplayMisc; - ULONG ulDDISlot1Config; - ULONG ulDDISlot2Config; - UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved - UCHAR ucUMAChannelNumber; - UCHAR ucDockingPinBit; - UCHAR ucDockingPinPolarity; - ULONG ulDockingPinCFGInfo; - ULONG ulCPUCapInfo; - USHORT usNumberOfCyclesInPeriod; - USHORT usMaxNBVoltage; - USHORT usMinNBVoltage; - USHORT usBootUpNBVoltage; - ULONG ulHTLinkFreq; //in 10Khz - USHORT usMinHTLinkWidth; - USHORT usMaxHTLinkWidth; - USHORT usUMASyncStartDelay; - USHORT usUMADataReturnTime; - USHORT usLinkStatusZeroTime; - USHORT usDACEfuse; //for storing badgap value (for RS880 only) - ULONG ulHighVoltageHTLinkFreq; // in 10Khz - ULONG ulLowVoltageHTLinkFreq; // in 10Khz - USHORT usMaxUpStreamHTLinkWidth; - USHORT usMaxDownStreamHTLinkWidth; - USHORT usMinUpStreamHTLinkWidth; - USHORT usMinDownStreamHTLinkWidth; - USHORT usFirmwareVersion; //0 means FW is not supported. Otherwise it's the FW version loaded by SBIOS and driver should enable FW. - USHORT usFullT0Time; // Input to calculate minimum HT link change time required by NB P-State. Unit is 0.01us. - ULONG ulReserved3[96]; //must be 0x0 -}ATOM_INTEGRATED_SYSTEM_INFO_V2; - -/* -ulBootUpEngineClock: Boot-up Engine Clock in 10Khz; -ulBootUpUMAClock: Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present -ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock - -ulSystemConfig: -Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode; -Bit[1]=1: system boots up at AMD overdrived state or user customized mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state - =0: system boots up at driver control state. Power state depends on PowerPlay table. -Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used. -Bit[3]=1: Only one power state(Performance) will be supported. - =0: Multiple power states supported from PowerPlay table. -Bit[4]=1: CLMC is supported and enabled on current system. - =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface. -Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement. - =0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied. -Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored. - =0: Voltage settings is determined by powerplay table. -Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue. - =0: Enable CLMC as regular mode, CDLD and CILR will be enabled. -Bit[8]=1: CDLF is supported and enabled on current system. - =0: CDLF is not supported or enabled on current system. -Bit[9]=1: DLL Shut Down feature is enabled on current system. - =0: DLL Shut Down feature is not enabled or supported on current system. - -ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions. - -ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion; - [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSuppportedStd definition; - -ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design). - [3:0] - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12) - [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 4=1 lane 3:0; bit 5=1 lane 7:4; bit 6=1 lane 11:8; bit 7=1 lane 15:12) - When a DDI connector is not "paired" (meaming two connections mutualexclusive on chassis or docking, only one of them can be connected at one time. - in both chassis and docking, SBIOS has to duplicate the same PCIE lane info from chassis to docking or vice versa. For example: - one DDI connector is only populated in docking with PCIE lane 8-11, but there is no paired connection on chassis, SBIOS has to copy bit 6 to bit 2. - - [15:8] - Lane configuration attribute; - [23:16]- Connector type, possible value: - CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D - CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D - CONNECTOR_OBJECT_ID_HDMI_TYPE_A - CONNECTOR_OBJECT_ID_DISPLAYPORT - CONNECTOR_OBJECT_ID_eDP - [31:24]- Reserved - -ulDDISlot2Config: Same as Slot1. -ucMemoryType: SidePort memory type, set it to 0x0 when Sideport memory is not installed. Driver needs this info to change sideport memory clock. Not for display in CCC. -For IGP, Hypermemory is the only memory type showed in CCC. - -ucUMAChannelNumber: how many channels for the UMA; - -ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin -ucDockingPinBit: which bit in this register to read the pin status; -ucDockingPinPolarity:Polarity of the pin when docked; - -ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, [7:0]=4:Pharaoh, other bits reserved for now and must be 0x0 - -usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%. - -usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode. -usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode. - GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0 - PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1 - GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE - -usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value. - - -ulHTLinkFreq: Bootup HT link Frequency in 10Khz. -usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth. - If CDLW enabled, both upstream and downstream width should be the same during bootup. -usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth. - If CDLW enabled, both upstream and downstream width should be the same during bootup. - -usUMASyncStartDelay: Memory access latency, required for watermark calculation -usUMADataReturnTime: Memory access latency, required for watermark calculation -usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us -for Griffin or Greyhound. SBIOS needs to convert to actual time by: - if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us) - if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us) - if T0Ttime [5:4]=10b, then usLinkStatusZeroTime=T0Ttime [3:0]*2.0us (0.0 to 30us) - if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us) - -ulHighVoltageHTLinkFreq: HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0. - This must be less than or equal to ulHTLinkFreq(bootup frequency). -ulLowVoltageHTLinkFreq: HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0. - This must be less than or equal to ulHighVoltageHTLinkFreq. - -usMaxUpStreamHTLinkWidth: Asymmetric link width support in the future, to replace usMaxHTLinkWidth. Not used for now. -usMaxDownStreamHTLinkWidth: same as above. -usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to replace usMinHTLinkWidth. Not used for now. -usMinDownStreamHTLinkWidth: same as above. -*/ - -// ATOM_INTEGRATED_SYSTEM_INFO::ulCPUCapInfo - CPU type definition -#define INTEGRATED_SYSTEM_INFO__UNKNOWN_CPU 0 -#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GRIFFIN 1 -#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND 2 -#define INTEGRATED_SYSTEM_INFO__AMD_CPU__K8 3 -#define INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH 4 -#define INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI 5 - -#define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI // this deff reflects max defined CPU code - -#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001 -#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002 -#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004 -#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY 0x00000008 -#define SYSTEM_CONFIG_CLMC_ENABLED 0x00000010 -#define SYSTEM_CONFIG_CDLW_ENABLED 0x00000020 -#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED 0x00000040 -#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED 0x00000080 -#define SYSTEM_CONFIG_CDLF_ENABLED 0x00000100 -#define SYSTEM_CONFIG_DLL_SHUTDOWN_ENABLED 0x00000200 - -#define IGP_DDI_SLOT_LANE_CONFIG_MASK 0x000000FF - -#define b0IGP_DDI_SLOT_LANE_MAP_MASK 0x0F -#define b0IGP_DDI_SLOT_DOCKING_LANE_MAP_MASK 0xF0 -#define b0IGP_DDI_SLOT_CONFIG_LANE_0_3 0x01 -#define b0IGP_DDI_SLOT_CONFIG_LANE_4_7 0x02 -#define b0IGP_DDI_SLOT_CONFIG_LANE_8_11 0x04 -#define b0IGP_DDI_SLOT_CONFIG_LANE_12_15 0x08 - -#define IGP_DDI_SLOT_ATTRIBUTE_MASK 0x0000FF00 -#define IGP_DDI_SLOT_CONFIG_REVERSED 0x00000100 -#define b1IGP_DDI_SLOT_CONFIG_REVERSED 0x01 - -#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK 0x00FF0000 - -// IntegratedSystemInfoTable new Rev is V5 after V2, because of the real rev of V2 is v1.4. This rev is used for RR -typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulBootUpEngineClock; //in 10kHz unit - ULONG ulDentistVCOFreq; //Dentist VCO clock in 10kHz unit, the source of GPU SCLK, LCLK, UCLK and VCLK. - ULONG ulLClockFreq; //GPU Lclk freq in 10kHz unit, have relationship with NCLK in NorthBridge - ULONG ulBootUpUMAClock; //in 10kHz unit - ULONG ulReserved1[8]; //must be 0x0 for the reserved - ULONG ulBootUpReqDisplayVector; - ULONG ulOtherDisplayMisc; - ULONG ulReserved2[4]; //must be 0x0 for the reserved - ULONG ulSystemConfig; //TBD - ULONG ulCPUCapInfo; //TBD - USHORT usMaxNBVoltage; //high NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse; - USHORT usMinNBVoltage; //low NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse; - USHORT usBootUpNBVoltage; //boot up NB voltage - UCHAR ucHtcTmpLmt; //bit [22:16] of D24F3x64 Hardware Thermal Control (HTC) Register, may not be needed, TBD - UCHAR ucTjOffset; //bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed, TBD - ULONG ulReserved3[4]; //must be 0x0 for the reserved - ULONG ulDDISlot1Config; //see above ulDDISlot1Config definition - ULONG ulDDISlot2Config; - ULONG ulDDISlot3Config; - ULONG ulDDISlot4Config; - ULONG ulReserved4[4]; //must be 0x0 for the reserved - UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved - UCHAR ucUMAChannelNumber; - USHORT usReserved; - ULONG ulReserved5[4]; //must be 0x0 for the reserved - ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];//arrays with values for CSR M3 arbiter for default - ULONG ulCSR_M3_ARB_CNTL_UVD[10]; //arrays with values for CSR M3 arbiter for UVD playback - ULONG ulCSR_M3_ARB_CNTL_FS3D[10];//arrays with values for CSR M3 arbiter for Full Screen 3D applications - ULONG ulReserved6[61]; //must be 0x0 -}ATOM_INTEGRATED_SYSTEM_INFO_V5; - - - -/****************************************************************************/ -// Structure used in GPUVirtualizationInfoTable -/****************************************************************************/ -typedef struct _ATOM_GPU_VIRTUALIZATION_INFO_V2_1 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulMCUcodeRomStartAddr; - ULONG ulMCUcodeLength; - ULONG ulSMCUcodeRomStartAddr; - ULONG ulSMCUcodeLength; - ULONG ulRLCVUcodeRomStartAddr; - ULONG ulRLCVUcodeLength; - ULONG ulTOCUcodeStartAddr; - ULONG ulTOCUcodeLength; - ULONG ulSMCPatchTableStartAddr; - ULONG ulSmcPatchTableLength; - ULONG ulSystemFlag; -}ATOM_GPU_VIRTUALIZATION_INFO_V2_1; - - -#define ATOM_CRT_INT_ENCODER1_INDEX 0x00000000 -#define ATOM_LCD_INT_ENCODER1_INDEX 0x00000001 -#define ATOM_TV_INT_ENCODER1_INDEX 0x00000002 -#define ATOM_DFP_INT_ENCODER1_INDEX 0x00000003 -#define ATOM_CRT_INT_ENCODER2_INDEX 0x00000004 -#define ATOM_LCD_EXT_ENCODER1_INDEX 0x00000005 -#define ATOM_TV_EXT_ENCODER1_INDEX 0x00000006 -#define ATOM_DFP_EXT_ENCODER1_INDEX 0x00000007 -#define ATOM_CV_INT_ENCODER1_INDEX 0x00000008 -#define ATOM_DFP_INT_ENCODER2_INDEX 0x00000009 -#define ATOM_CRT_EXT_ENCODER1_INDEX 0x0000000A -#define ATOM_CV_EXT_ENCODER1_INDEX 0x0000000B -#define ATOM_DFP_INT_ENCODER3_INDEX 0x0000000C -#define ATOM_DFP_INT_ENCODER4_INDEX 0x0000000D - -// define ASIC internal encoder id ( bit vector ), used for CRTC_SourceSelTable -#define ASIC_INT_DAC1_ENCODER_ID 0x00 -#define ASIC_INT_TV_ENCODER_ID 0x02 -#define ASIC_INT_DIG1_ENCODER_ID 0x03 -#define ASIC_INT_DAC2_ENCODER_ID 0x04 -#define ASIC_EXT_TV_ENCODER_ID 0x06 -#define ASIC_INT_DVO_ENCODER_ID 0x07 -#define ASIC_INT_DIG2_ENCODER_ID 0x09 -#define ASIC_EXT_DIG_ENCODER_ID 0x05 -#define ASIC_EXT_DIG2_ENCODER_ID 0x08 -#define ASIC_INT_DIG3_ENCODER_ID 0x0a -#define ASIC_INT_DIG4_ENCODER_ID 0x0b -#define ASIC_INT_DIG5_ENCODER_ID 0x0c -#define ASIC_INT_DIG6_ENCODER_ID 0x0d -#define ASIC_INT_DIG7_ENCODER_ID 0x0e - -//define Encoder attribute -#define ATOM_ANALOG_ENCODER 0 -#define ATOM_DIGITAL_ENCODER 1 -#define ATOM_DP_ENCODER 2 - -#define ATOM_ENCODER_ENUM_MASK 0x70 -#define ATOM_ENCODER_ENUM_ID1 0x00 -#define ATOM_ENCODER_ENUM_ID2 0x10 -#define ATOM_ENCODER_ENUM_ID3 0x20 -#define ATOM_ENCODER_ENUM_ID4 0x30 -#define ATOM_ENCODER_ENUM_ID5 0x40 -#define ATOM_ENCODER_ENUM_ID6 0x50 - -#define ATOM_DEVICE_CRT1_INDEX 0x00000000 -#define ATOM_DEVICE_LCD1_INDEX 0x00000001 -#define ATOM_DEVICE_TV1_INDEX 0x00000002 -#define ATOM_DEVICE_DFP1_INDEX 0x00000003 -#define ATOM_DEVICE_CRT2_INDEX 0x00000004 -#define ATOM_DEVICE_LCD2_INDEX 0x00000005 -#define ATOM_DEVICE_DFP6_INDEX 0x00000006 -#define ATOM_DEVICE_DFP2_INDEX 0x00000007 -#define ATOM_DEVICE_CV_INDEX 0x00000008 -#define ATOM_DEVICE_DFP3_INDEX 0x00000009 -#define ATOM_DEVICE_DFP4_INDEX 0x0000000A -#define ATOM_DEVICE_DFP5_INDEX 0x0000000B - -#define ATOM_DEVICE_RESERVEDC_INDEX 0x0000000C -#define ATOM_DEVICE_RESERVEDD_INDEX 0x0000000D -#define ATOM_DEVICE_RESERVEDE_INDEX 0x0000000E -#define ATOM_DEVICE_RESERVEDF_INDEX 0x0000000F -#define ATOM_MAX_SUPPORTED_DEVICE_INFO (ATOM_DEVICE_DFP3_INDEX+1) -#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2 ATOM_MAX_SUPPORTED_DEVICE_INFO -#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1 ) - -#define ATOM_MAX_SUPPORTED_DEVICE (ATOM_DEVICE_RESERVEDF_INDEX+1) - -#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX ) -#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX ) -#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX ) -#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX ) -#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX ) -#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX ) -#define ATOM_DEVICE_DFP6_SUPPORT (0x1L << ATOM_DEVICE_DFP6_INDEX ) -#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX ) -#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX ) -#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX ) -#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX ) -#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX ) - - -#define ATOM_DEVICE_CRT_SUPPORT (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT) -#define ATOM_DEVICE_DFP_SUPPORT (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | ATOM_DEVICE_DFP5_SUPPORT | ATOM_DEVICE_DFP6_SUPPORT) -#define ATOM_DEVICE_TV_SUPPORT ATOM_DEVICE_TV1_SUPPORT -#define ATOM_DEVICE_LCD_SUPPORT (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT) - -#define ATOM_DEVICE_CONNECTOR_TYPE_MASK 0x000000F0 -#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT 0x00000004 -#define ATOM_DEVICE_CONNECTOR_VGA 0x00000001 -#define ATOM_DEVICE_CONNECTOR_DVI_I 0x00000002 -#define ATOM_DEVICE_CONNECTOR_DVI_D 0x00000003 -#define ATOM_DEVICE_CONNECTOR_DVI_A 0x00000004 -#define ATOM_DEVICE_CONNECTOR_SVIDEO 0x00000005 -#define ATOM_DEVICE_CONNECTOR_COMPOSITE 0x00000006 -#define ATOM_DEVICE_CONNECTOR_LVDS 0x00000007 -#define ATOM_DEVICE_CONNECTOR_DIGI_LINK 0x00000008 -#define ATOM_DEVICE_CONNECTOR_SCART 0x00000009 -#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_A 0x0000000A -#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_B 0x0000000B -#define ATOM_DEVICE_CONNECTOR_CASE_1 0x0000000E -#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT 0x0000000F - - -#define ATOM_DEVICE_DAC_INFO_MASK 0x0000000F -#define ATOM_DEVICE_DAC_INFO_SHIFT 0x00000000 -#define ATOM_DEVICE_DAC_INFO_NODAC 0x00000000 -#define ATOM_DEVICE_DAC_INFO_DACA 0x00000001 -#define ATOM_DEVICE_DAC_INFO_DACB 0x00000002 -#define ATOM_DEVICE_DAC_INFO_EXDAC 0x00000003 - -#define ATOM_DEVICE_I2C_ID_NOI2C 0x00000000 - -#define ATOM_DEVICE_I2C_LINEMUX_MASK 0x0000000F -#define ATOM_DEVICE_I2C_LINEMUX_SHIFT 0x00000000 - -#define ATOM_DEVICE_I2C_ID_MASK 0x00000070 -#define ATOM_DEVICE_I2C_ID_SHIFT 0x00000004 -#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE 0x00000001 -#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE 0x00000002 -#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 //For IGP RS600 -#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 //For IGP RS690 - -#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK 0x00000080 -#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT 0x00000007 -#define ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C 0x00000000 -#define ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C 0x00000001 - -// usDeviceSupport: -// Bits0 = 0 - no CRT1 support= 1- CRT1 is supported -// Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported -// Bit 2 = 0 - no TV1 support= 1- TV1 is supported -// Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported -// Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported -// Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported -// Bit 6 = 0 - no DFP6 support= 1- DFP6 is supported -// Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported -// Bit 8 = 0 - no CV support= 1- CV is supported -// Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported -// Bit 10= 0 - no DFP4 support= 1- DFP4 is supported -// Bit 11= 0 - no DFP5 support= 1- DFP5 is supported -// -// - -/****************************************************************************/ -// Structure used in MclkSS_InfoTable -/****************************************************************************/ -// ucI2C_ConfigID -// [7:0] - I2C LINE Associate ID -// = 0 - no I2C -// [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection) -// = 0, [6:0]=SW assisted I2C ID -// [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use -// = 2, HW engine for Multimedia use -// = 3-7 Reserved for future I2C engines -// [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C - -typedef struct _ATOM_I2C_ID_CONFIG -{ -#if ATOM_BIG_ENDIAN - UCHAR bfHW_Capable:1; - UCHAR bfHW_EngineID:3; - UCHAR bfI2C_LineMux:4; -#else - UCHAR bfI2C_LineMux:4; - UCHAR bfHW_EngineID:3; - UCHAR bfHW_Capable:1; -#endif -}ATOM_I2C_ID_CONFIG; - -typedef union _ATOM_I2C_ID_CONFIG_ACCESS -{ - ATOM_I2C_ID_CONFIG sbfAccess; - UCHAR ucAccess; -}ATOM_I2C_ID_CONFIG_ACCESS; - - -/****************************************************************************/ -// Structure used in GPIO_I2C_InfoTable -/****************************************************************************/ -typedef struct _ATOM_GPIO_I2C_ASSIGMENT -{ - USHORT usClkMaskRegisterIndex; - USHORT usClkEnRegisterIndex; - USHORT usClkY_RegisterIndex; - USHORT usClkA_RegisterIndex; - USHORT usDataMaskRegisterIndex; - USHORT usDataEnRegisterIndex; - USHORT usDataY_RegisterIndex; - USHORT usDataA_RegisterIndex; - ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; - UCHAR ucClkMaskShift; - UCHAR ucClkEnShift; - UCHAR ucClkY_Shift; - UCHAR ucClkA_Shift; - UCHAR ucDataMaskShift; - UCHAR ucDataEnShift; - UCHAR ucDataY_Shift; - UCHAR ucDataA_Shift; - UCHAR ucReserved1; - UCHAR ucReserved2; -}ATOM_GPIO_I2C_ASSIGMENT; - -typedef struct _ATOM_GPIO_I2C_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE]; -}ATOM_GPIO_I2C_INFO; - -/****************************************************************************/ -// Common Structure used in other structures -/****************************************************************************/ - -#ifndef _H2INC - -//Please don't add or expand this bitfield structure below, this one will retire soon.! -typedef struct _ATOM_MODE_MISC_INFO -{ -#if ATOM_BIG_ENDIAN - USHORT Reserved:6; - USHORT RGB888:1; - USHORT DoubleClock:1; - USHORT Interlace:1; - USHORT CompositeSync:1; - USHORT V_ReplicationBy2:1; - USHORT H_ReplicationBy2:1; - USHORT VerticalCutOff:1; - USHORT VSyncPolarity:1; //0=Active High, 1=Active Low - USHORT HSyncPolarity:1; //0=Active High, 1=Active Low - USHORT HorizontalCutOff:1; -#else - USHORT HorizontalCutOff:1; - USHORT HSyncPolarity:1; //0=Active High, 1=Active Low - USHORT VSyncPolarity:1; //0=Active High, 1=Active Low - USHORT VerticalCutOff:1; - USHORT H_ReplicationBy2:1; - USHORT V_ReplicationBy2:1; - USHORT CompositeSync:1; - USHORT Interlace:1; - USHORT DoubleClock:1; - USHORT RGB888:1; - USHORT Reserved:6; -#endif -}ATOM_MODE_MISC_INFO; - -typedef union _ATOM_MODE_MISC_INFO_ACCESS -{ - ATOM_MODE_MISC_INFO sbfAccess; - USHORT usAccess; -}ATOM_MODE_MISC_INFO_ACCESS; - -#else - -typedef union _ATOM_MODE_MISC_INFO_ACCESS -{ - USHORT usAccess; -}ATOM_MODE_MISC_INFO_ACCESS; - -#endif - -// usModeMiscInfo- -#define ATOM_H_CUTOFF 0x01 -#define ATOM_HSYNC_POLARITY 0x02 //0=Active High, 1=Active Low -#define ATOM_VSYNC_POLARITY 0x04 //0=Active High, 1=Active Low -#define ATOM_V_CUTOFF 0x08 -#define ATOM_H_REPLICATIONBY2 0x10 -#define ATOM_V_REPLICATIONBY2 0x20 -#define ATOM_COMPOSITESYNC 0x40 -#define ATOM_INTERLACE 0x80 -#define ATOM_DOUBLE_CLOCK_MODE 0x100 -#define ATOM_RGB888_MODE 0x200 - -//usRefreshRate- -#define ATOM_REFRESH_43 43 -#define ATOM_REFRESH_47 47 -#define ATOM_REFRESH_56 56 -#define ATOM_REFRESH_60 60 -#define ATOM_REFRESH_65 65 -#define ATOM_REFRESH_70 70 -#define ATOM_REFRESH_72 72 -#define ATOM_REFRESH_75 75 -#define ATOM_REFRESH_85 85 - -// ATOM_MODE_TIMING data are exactly the same as VESA timing data. -// Translation from EDID to ATOM_MODE_TIMING, use the following formula. -// -// VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK -// = EDID_HA + EDID_HBL -// VESA_HDISP = VESA_ACTIVE = EDID_HA -// VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH -// = EDID_HA + EDID_HSO -// VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW -// VESA_BORDER = EDID_BORDER - - -/****************************************************************************/ -// Structure used in SetCRTC_UsingDTDTimingTable -/****************************************************************************/ -typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS -{ - USHORT usH_Size; - USHORT usH_Blanking_Time; - USHORT usV_Size; - USHORT usV_Blanking_Time; - USHORT usH_SyncOffset; - USHORT usH_SyncWidth; - USHORT usV_SyncOffset; - USHORT usV_SyncWidth; - ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; - UCHAR ucH_Border; // From DFP EDID - UCHAR ucV_Border; - UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 - UCHAR ucPadding[3]; -}SET_CRTC_USING_DTD_TIMING_PARAMETERS; - -/****************************************************************************/ -// Structure used in SetCRTC_TimingTable -/****************************************************************************/ -typedef struct _SET_CRTC_TIMING_PARAMETERS -{ - USHORT usH_Total; // horizontal total - USHORT usH_Disp; // horizontal display - USHORT usH_SyncStart; // horozontal Sync start - USHORT usH_SyncWidth; // horizontal Sync width - USHORT usV_Total; // vertical total - USHORT usV_Disp; // vertical display - USHORT usV_SyncStart; // vertical Sync start - USHORT usV_SyncWidth; // vertical Sync width - ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; - UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 - UCHAR ucOverscanRight; // right - UCHAR ucOverscanLeft; // left - UCHAR ucOverscanBottom; // bottom - UCHAR ucOverscanTop; // top - UCHAR ucReserved; -}SET_CRTC_TIMING_PARAMETERS; -#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS - - -/****************************************************************************/ -// Structure used in StandardVESA_TimingTable -// AnalogTV_InfoTable -// ComponentVideoInfoTable -/****************************************************************************/ -typedef struct _ATOM_MODE_TIMING -{ - USHORT usCRTC_H_Total; - USHORT usCRTC_H_Disp; - USHORT usCRTC_H_SyncStart; - USHORT usCRTC_H_SyncWidth; - USHORT usCRTC_V_Total; - USHORT usCRTC_V_Disp; - USHORT usCRTC_V_SyncStart; - USHORT usCRTC_V_SyncWidth; - USHORT usPixelClock; //in 10Khz unit - ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; - USHORT usCRTC_OverscanRight; - USHORT usCRTC_OverscanLeft; - USHORT usCRTC_OverscanBottom; - USHORT usCRTC_OverscanTop; - USHORT usReserve; - UCHAR ucInternalModeNumber; - UCHAR ucRefreshRate; -}ATOM_MODE_TIMING; - -typedef struct _ATOM_DTD_FORMAT -{ - USHORT usPixClk; - USHORT usHActive; - USHORT usHBlanking_Time; - USHORT usVActive; - USHORT usVBlanking_Time; - USHORT usHSyncOffset; - USHORT usHSyncWidth; - USHORT usVSyncOffset; - USHORT usVSyncWidth; - USHORT usImageHSize; - USHORT usImageVSize; - UCHAR ucHBorder; - UCHAR ucVBorder; - ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; - UCHAR ucInternalModeNumber; - UCHAR ucRefreshRate; -}ATOM_DTD_FORMAT; - -/****************************************************************************/ -// Structure used in LVDS_InfoTable -// * Need a document to describe this table -/****************************************************************************/ -#define SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004 -#define SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008 -#define SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010 -#define SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020 -#define SUPPORTED_LCD_REFRESHRATE_48Hz 0x0040 - -//ucTableFormatRevision=1 -//ucTableContentRevision=1 -typedef struct _ATOM_LVDS_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_DTD_FORMAT sLCDTiming; - USHORT usModePatchTableOffset; - USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec. - USHORT usOffDelayInMs; - UCHAR ucPowerSequenceDigOntoDEin10Ms; - UCHAR ucPowerSequenceDEtoBLOnin10Ms; - UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} - // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} - // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} - // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} - UCHAR ucPanelDefaultRefreshRate; - UCHAR ucPanelIdentification; - UCHAR ucSS_Id; -}ATOM_LVDS_INFO; - -//ucTableFormatRevision=1 -//ucTableContentRevision=2 -typedef struct _ATOM_LVDS_INFO_V12 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_DTD_FORMAT sLCDTiming; - USHORT usExtInfoTableOffset; - USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec. - USHORT usOffDelayInMs; - UCHAR ucPowerSequenceDigOntoDEin10Ms; - UCHAR ucPowerSequenceDEtoBLOnin10Ms; - UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} - // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} - // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} - // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} - UCHAR ucPanelDefaultRefreshRate; - UCHAR ucPanelIdentification; - UCHAR ucSS_Id; - USHORT usLCDVenderID; - USHORT usLCDProductID; - UCHAR ucLCDPanel_SpecialHandlingCap; - UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable - UCHAR ucReserved[2]; -}ATOM_LVDS_INFO_V12; - -//Definitions for ucLCDPanel_SpecialHandlingCap: - -//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. -//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL -#define LCDPANEL_CAP_READ_EDID 0x1 - -//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together -//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static -//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12 -#define LCDPANEL_CAP_DRR_SUPPORTED 0x2 - -//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP. -#define LCDPANEL_CAP_eDP 0x4 - - -//Color Bit Depth definition in EDID V1.4 @BYTE 14h -//Bit 6 5 4 - // 0 0 0 - Color bit depth is undefined - // 0 0 1 - 6 Bits per Primary Color - // 0 1 0 - 8 Bits per Primary Color - // 0 1 1 - 10 Bits per Primary Color - // 1 0 0 - 12 Bits per Primary Color - // 1 0 1 - 14 Bits per Primary Color - // 1 1 0 - 16 Bits per Primary Color - // 1 1 1 - Reserved - -#define PANEL_COLOR_BIT_DEPTH_MASK 0x70 - -// Bit7:{=0:Random Dithering disabled;1 Random Dithering enabled} -#define PANEL_RANDOM_DITHER 0x80 -#define PANEL_RANDOM_DITHER_MASK 0x80 - -#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 // no need to change this - - -typedef struct _ATOM_LCD_REFRESH_RATE_SUPPORT -{ - UCHAR ucSupportedRefreshRate; - UCHAR ucMinRefreshRateForDRR; -}ATOM_LCD_REFRESH_RATE_SUPPORT; - -/****************************************************************************/ -// Structures used by LCD_InfoTable V1.3 Note: previous version was called ATOM_LVDS_INFO_V12 -// ASIC Families: NI -// ucTableFormatRevision=1 -// ucTableContentRevision=3 -/****************************************************************************/ -typedef struct _ATOM_LCD_INFO_V13 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_DTD_FORMAT sLCDTiming; - USHORT usExtInfoTableOffset; - union - { - USHORT usSupportedRefreshRate; - ATOM_LCD_REFRESH_RATE_SUPPORT sRefreshRateSupport; - }; - ULONG ulReserved0; - UCHAR ucLCD_Misc; // Reorganized in V13 - // Bit0: {=0:single, =1:dual}, - // Bit1: {=0:LDI format for RGB888, =1 FPDI format for RGB888} // was {=0:666RGB, =1:888RGB}, - // Bit3:2: {Grey level} - // Bit6:4 Color Bit Depth definition (see below definition in EDID V1.4 @BYTE 14h) - // Bit7 Reserved. was for ATOM_PANEL_MISC_API_ENABLED, still need it? - UCHAR ucPanelDefaultRefreshRate; - UCHAR ucPanelIdentification; - UCHAR ucSS_Id; - USHORT usLCDVenderID; - USHORT usLCDProductID; - UCHAR ucLCDPanel_SpecialHandlingCap; // Reorganized in V13 - // Bit0: Once DAL sees this CAP is set, it will read EDID from LCD on its own - // Bit1: See LCDPANEL_CAP_DRR_SUPPORTED - // Bit2: a quick reference whether an embadded panel (LCD1 ) is LVDS (0) or eDP (1) - // Bit7-3: Reserved - UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable - USHORT usBacklightPWM; // Backlight PWM in Hz. New in _V13 - - UCHAR ucPowerSequenceDIGONtoDE_in4Ms; - UCHAR ucPowerSequenceDEtoVARY_BL_in4Ms; - UCHAR ucPowerSequenceVARY_BLtoDE_in4Ms; - UCHAR ucPowerSequenceDEtoDIGON_in4Ms; - - UCHAR ucOffDelay_in4Ms; - UCHAR ucPowerSequenceVARY_BLtoBLON_in4Ms; - UCHAR ucPowerSequenceBLONtoVARY_BL_in4Ms; - UCHAR ucReserved1; - - UCHAR ucDPCD_eDP_CONFIGURATION_CAP; // dpcd 0dh - UCHAR ucDPCD_MAX_LINK_RATE; // dpcd 01h - UCHAR ucDPCD_MAX_LANE_COUNT; // dpcd 02h - UCHAR ucDPCD_MAX_DOWNSPREAD; // dpcd 03h - - USHORT usMaxPclkFreqInSingleLink; // Max PixelClock frequency in single link mode. - UCHAR uceDPToLVDSRxId; - UCHAR ucLcdReservd; - ULONG ulReserved[2]; -}ATOM_LCD_INFO_V13; - -#define ATOM_LCD_INFO_LAST ATOM_LCD_INFO_V13 - -//Definitions for ucLCD_Misc -#define ATOM_PANEL_MISC_V13_DUAL 0x00000001 -#define ATOM_PANEL_MISC_V13_FPDI 0x00000002 -#define ATOM_PANEL_MISC_V13_GREY_LEVEL 0x0000000C -#define ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT 2 -#define ATOM_PANEL_MISC_V13_COLOR_BIT_DEPTH_MASK 0x70 -#define ATOM_PANEL_MISC_V13_6BIT_PER_COLOR 0x10 -#define ATOM_PANEL_MISC_V13_8BIT_PER_COLOR 0x20 - -//Color Bit Depth definition in EDID V1.4 @BYTE 14h -//Bit 6 5 4 - // 0 0 0 - Color bit depth is undefined - // 0 0 1 - 6 Bits per Primary Color - // 0 1 0 - 8 Bits per Primary Color - // 0 1 1 - 10 Bits per Primary Color - // 1 0 0 - 12 Bits per Primary Color - // 1 0 1 - 14 Bits per Primary Color - // 1 1 0 - 16 Bits per Primary Color - // 1 1 1 - Reserved - -//Definitions for ucLCDPanel_SpecialHandlingCap: - -//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. -//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL -#define LCDPANEL_CAP_V13_READ_EDID 0x1 // = LCDPANEL_CAP_READ_EDID no change comparing to previous version - -//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together -//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static -//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12 -#define LCDPANEL_CAP_V13_DRR_SUPPORTED 0x2 // = LCDPANEL_CAP_DRR_SUPPORTED no change comparing to previous version - -//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP. -#define LCDPANEL_CAP_V13_eDP 0x4 // = LCDPANEL_CAP_eDP no change comparing to previous version - -//uceDPToLVDSRxId -#define eDP_TO_LVDS_RX_DISABLE 0x00 // no eDP->LVDS translator chip -#define eDP_TO_LVDS_COMMON_ID 0x01 // common eDP->LVDS translator chip without AMD SW init -#define eDP_TO_LVDS_RT_ID 0x02 // RT tansaltor which require AMD SW init - -typedef struct _ATOM_PATCH_RECORD_MODE -{ - UCHAR ucRecordType; - USHORT usHDisp; - USHORT usVDisp; -}ATOM_PATCH_RECORD_MODE; - -typedef struct _ATOM_LCD_RTS_RECORD -{ - UCHAR ucRecordType; - UCHAR ucRTSValue; -}ATOM_LCD_RTS_RECORD; - -//!! If the record below exits, it shoud always be the first record for easy use in command table!!! -// The record below is only used when LVDS_Info is present. From ATOM_LVDS_INFO_V12, use ucLCDPanel_SpecialHandlingCap instead. -typedef struct _ATOM_LCD_MODE_CONTROL_CAP -{ - UCHAR ucRecordType; - USHORT usLCDCap; -}ATOM_LCD_MODE_CONTROL_CAP; - -#define LCD_MODE_CAP_BL_OFF 1 -#define LCD_MODE_CAP_CRTC_OFF 2 -#define LCD_MODE_CAP_PANEL_OFF 4 - - -typedef struct _ATOM_FAKE_EDID_PATCH_RECORD -{ - UCHAR ucRecordType; - UCHAR ucFakeEDIDLength; // = 128 means EDID lenght is 128 bytes, otherwise the EDID length = ucFakeEDIDLength*128 - UCHAR ucFakeEDIDString[1]; // This actually has ucFakeEdidLength elements. -} ATOM_FAKE_EDID_PATCH_RECORD; - -typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD -{ - UCHAR ucRecordType; - USHORT usHSize; - USHORT usVSize; -}ATOM_PANEL_RESOLUTION_PATCH_RECORD; - -#define LCD_MODE_PATCH_RECORD_MODE_TYPE 1 -#define LCD_RTS_RECORD_TYPE 2 -#define LCD_CAP_RECORD_TYPE 3 -#define LCD_FAKE_EDID_PATCH_RECORD_TYPE 4 -#define LCD_PANEL_RESOLUTION_RECORD_TYPE 5 -#define LCD_EDID_OFFSET_PATCH_RECORD_TYPE 6 -#define ATOM_RECORD_END_TYPE 0xFF - -/****************************Spread Spectrum Info Table Definitions **********************/ - -//ucTableFormatRevision=1 -//ucTableContentRevision=2 -typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT -{ - USHORT usSpreadSpectrumPercentage; - UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Bit2=1: PCIE REFCLK SS =0 iternal PPLL SS Others:TBD - UCHAR ucSS_Step; - UCHAR ucSS_Delay; - UCHAR ucSS_Id; - UCHAR ucRecommendedRef_Div; - UCHAR ucSS_Range; //it was reserved for V11 -}ATOM_SPREAD_SPECTRUM_ASSIGNMENT; - -#define ATOM_MAX_SS_ENTRY 16 -#define ATOM_DP_SS_ID1 0x0f1 // SS ID for internal DP stream at 2.7Ghz. if ATOM_DP_SS_ID2 does not exist in SS_InfoTable, it is used for internal DP stream at 1.62Ghz as well. -#define ATOM_DP_SS_ID2 0x0f2 // SS ID for internal DP stream at 1.62Ghz, if it exists in SS_InfoTable. -#define ATOM_LVLINK_2700MHz_SS_ID 0x0f3 // SS ID for LV link translator chip at 2.7Ghz -#define ATOM_LVLINK_1620MHz_SS_ID 0x0f4 // SS ID for LV link translator chip at 1.62Ghz - - - -#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000 -#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000 -#define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001 -#define ATOM_SS_CENTRE_SPREAD_MODE 0x00000001 -#define ATOM_INTERNAL_SS_MASK 0x00000000 -#define ATOM_EXTERNAL_SS_MASK 0x00000002 -#define EXEC_SS_STEP_SIZE_SHIFT 2 -#define EXEC_SS_DELAY_SHIFT 4 -#define ACTIVEDATA_TO_BLON_DELAY_SHIFT 4 - -typedef struct _ATOM_SPREAD_SPECTRUM_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY]; -}ATOM_SPREAD_SPECTRUM_INFO; - - -/****************************************************************************/ -// Structure used in AnalogTV_InfoTable (Top level) -/****************************************************************************/ -//ucTVBootUpDefaultStd definiton: - -//ATOM_TV_NTSC 1 -//ATOM_TV_NTSCJ 2 -//ATOM_TV_PAL 3 -//ATOM_TV_PALM 4 -//ATOM_TV_PALCN 5 -//ATOM_TV_PALN 6 -//ATOM_TV_PAL60 7 -//ATOM_TV_SECAM 8 - -//ucTVSuppportedStd definition: -#define NTSC_SUPPORT 0x1 -#define NTSCJ_SUPPORT 0x2 - -#define PAL_SUPPORT 0x4 -#define PALM_SUPPORT 0x8 -#define PALCN_SUPPORT 0x10 -#define PALN_SUPPORT 0x20 -#define PAL60_SUPPORT 0x40 -#define SECAM_SUPPORT 0x80 - -#define MAX_SUPPORTED_TV_TIMING 2 - -typedef struct _ATOM_ANALOG_TV_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR ucTV_SuppportedStandard; - UCHAR ucTV_BootUpDefaultStandard; - UCHAR ucExt_TV_ASIC_ID; - UCHAR ucExt_TV_ASIC_SlaveAddr; - ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; -}ATOM_ANALOG_TV_INFO; - -typedef struct _ATOM_DPCD_INFO -{ - UCHAR ucRevisionNumber; //10h : Revision 1.0; 11h : Revision 1.1 - UCHAR ucMaxLinkRate; //06h : 1.62Gbps per lane; 0Ah = 2.7Gbps per lane - UCHAR ucMaxLane; //Bits 4:0 = MAX_LANE_COUNT (1/2/4). Bit 7 = ENHANCED_FRAME_CAP - UCHAR ucMaxDownSpread; //Bit0 = 0: No Down spread; Bit0 = 1: 0.5% (Subject to change according to DP spec) -}ATOM_DPCD_INFO; - -#define ATOM_DPCD_MAX_LANE_MASK 0x1F - -/**************************************************************************/ -// VRAM usage and their defintions - -// One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. -// Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. -// All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned! -// To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR -// To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX - -// Moved VESA_MEMORY_IN_64K_BLOCK definition to "AtomConfig.h" so that it can be redefined in design (SKU). -//#ifndef VESA_MEMORY_IN_64K_BLOCK -//#define VESA_MEMORY_IN_64K_BLOCK 0x100 //256*64K=16Mb (Max. VESA memory is 16Mb!) -//#endif - -#define ATOM_EDID_RAW_DATASIZE 256 //In Bytes -#define ATOM_HWICON_SURFACE_SIZE 4096 //In Bytes -#define ATOM_HWICON_INFOTABLE_SIZE 32 -#define MAX_DTD_MODE_IN_VRAM 6 -#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT) -#define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) -//20 bytes for Encoder Type and DPCD in STD EDID area -#define DFP_ENCODER_TYPE_OFFSET (ATOM_EDID_RAW_DATASIZE + ATOM_DTD_MODE_SUPPORT_TBL_SIZE + ATOM_STD_MODE_SUPPORT_TBL_SIZE - 20) -#define ATOM_DP_DPCD_OFFSET (DFP_ENCODER_TYPE_OFFSET + 4 ) - -#define ATOM_HWICON1_SURFACE_ADDR 0 -#define ATOM_HWICON2_SURFACE_ADDR (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE) -#define ATOM_HWICON_INFOTABLE_ADDR (ATOM_HWICON2_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE) -#define ATOM_CRT1_EDID_ADDR (ATOM_HWICON_INFOTABLE_ADDR + ATOM_HWICON_INFOTABLE_SIZE) -#define ATOM_CRT1_DTD_MODE_TBL_ADDR (ATOM_CRT1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) -#define ATOM_CRT1_STD_MODE_TBL_ADDR (ATOM_CRT1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) - -#define ATOM_LCD1_EDID_ADDR (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) -#define ATOM_LCD1_DTD_MODE_TBL_ADDR (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) -#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) - -#define ATOM_TV1_DTD_MODE_TBL_ADDR (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) - -#define ATOM_DFP1_EDID_ADDR (ATOM_TV1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) -#define ATOM_DFP1_DTD_MODE_TBL_ADDR (ATOM_DFP1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) -#define ATOM_DFP1_STD_MODE_TBL_ADDR (ATOM_DFP1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) - -#define ATOM_CRT2_EDID_ADDR (ATOM_DFP1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) -#define ATOM_CRT2_DTD_MODE_TBL_ADDR (ATOM_CRT2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) -#define ATOM_CRT2_STD_MODE_TBL_ADDR (ATOM_CRT2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) - -#define ATOM_LCD2_EDID_ADDR (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) -#define ATOM_LCD2_DTD_MODE_TBL_ADDR (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) -#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) - -#define ATOM_DFP6_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) -#define ATOM_DFP6_DTD_MODE_TBL_ADDR (ATOM_DFP6_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) -#define ATOM_DFP6_STD_MODE_TBL_ADDR (ATOM_DFP6_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) - -#define ATOM_DFP2_EDID_ADDR (ATOM_DFP6_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) -#define ATOM_DFP2_DTD_MODE_TBL_ADDR (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) -#define ATOM_DFP2_STD_MODE_TBL_ADDR (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) - -#define ATOM_CV_EDID_ADDR (ATOM_DFP2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) -#define ATOM_CV_DTD_MODE_TBL_ADDR (ATOM_CV_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) -#define ATOM_CV_STD_MODE_TBL_ADDR (ATOM_CV_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) - -#define ATOM_DFP3_EDID_ADDR (ATOM_CV_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) -#define ATOM_DFP3_DTD_MODE_TBL_ADDR (ATOM_DFP3_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) -#define ATOM_DFP3_STD_MODE_TBL_ADDR (ATOM_DFP3_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) - -#define ATOM_DFP4_EDID_ADDR (ATOM_DFP3_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) -#define ATOM_DFP4_DTD_MODE_TBL_ADDR (ATOM_DFP4_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) -#define ATOM_DFP4_STD_MODE_TBL_ADDR (ATOM_DFP4_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) - -#define ATOM_DFP5_EDID_ADDR (ATOM_DFP4_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) -#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) -#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) - -#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) - -#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 1024) -#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START + 512 - -//The size below is in Kb! -#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC) - -#define ATOM_VRAM_RESERVE_V2_SIZE 32 - -#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L -#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30 -#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1 -#define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0 - -/***********************************************************************************/ -// Structure used in VRAM_UsageByFirmwareTable -// Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm -// at running time. -// note2: From RV770, the memory is more than 32bit addressable, so we will change -// ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains -// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware -// (in offset to start of memory address) is KB aligned instead of byte aligend. -// Note3: -/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged -constant across VGA or non VGA adapter, -for CAIL, The size of FB access area is known, only thing missing is the Offset of FB Access area, so we can have: - -If (ulStartAddrUsedByFirmware!=0) -FBAccessAreaOffset= ulStartAddrUsedByFirmware - usFBUsedbyDrvInKB; -Reserved area has been claimed by VBIOS including this FB access area; CAIL doesn't need to reserve any extra area for this purpose -else //Non VGA case - if (FB_Size<=2Gb) - FBAccessAreaOffset= FB_Size - usFBUsedbyDrvInKB; - else - FBAccessAreaOffset= Aper_Size - usFBUsedbyDrvInKB - -CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/ - -/***********************************************************************************/ -#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1 - -typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO -{ - ULONG ulStartAddrUsedByFirmware; - USHORT usFirmwareUseInKb; - USHORT usReserved; -}ATOM_FIRMWARE_VRAM_RESERVE_INFO; - -typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_FIRMWARE_VRAM_RESERVE_INFO asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO]; -}ATOM_VRAM_USAGE_BY_FIRMWARE; - -// change verion to 1.5, when allow driver to allocate the vram area for command table access. -typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5 -{ - ULONG ulStartAddrUsedByFirmware; - USHORT usFirmwareUseInKb; - USHORT usFBUsedByDrvInKb; -}ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5; - -typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5 asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO]; -}ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5; - -/****************************************************************************/ -// Structure used in GPIO_Pin_LUTTable -/****************************************************************************/ -typedef struct _ATOM_GPIO_PIN_ASSIGNMENT -{ - USHORT usGpioPin_AIndex; - UCHAR ucGpioPinBitShift; - UCHAR ucGPIO_ID; -}ATOM_GPIO_PIN_ASSIGNMENT; - -//ucGPIO_ID pre-define id for multiple usage -// GPIO use to control PCIE_VDDC in certain SLT board -#define PCIE_VDDC_CONTROL_GPIO_PINID 56 - -//from SMU7.x, if ucGPIO_ID=PP_AC_DC_SWITCH_GPIO_PINID in GPIO_LUTTable, AC/DC swithing feature is enable -#define PP_AC_DC_SWITCH_GPIO_PINID 60 -//from SMU7.x, if ucGPIO_ID=VDDC_REGULATOR_VRHOT_GPIO_PINID in GPIO_LUTable, VRHot feature is enable -#define VDDC_VRHOT_GPIO_PINID 61 -//if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, Peak Current Control feature is enabled -#define VDDC_PCC_GPIO_PINID 62 -// Only used on certain SLT/PA board to allow utility to cut Efuse. -#define EFUSE_CUT_ENABLE_GPIO_PINID 63 -// ucGPIO=DRAM_SELF_REFRESH_GPIO_PIND uses for memory self refresh (ucGPIO=0, DRAM self-refresh; ucGPIO= -#define DRAM_SELF_REFRESH_GPIO_PINID 64 -// Thermal interrupt output->system thermal chip GPIO pin -#define THERMAL_INT_OUTPUT_GPIO_PINID 65 - - -typedef struct _ATOM_GPIO_PIN_LUT -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1]; -}ATOM_GPIO_PIN_LUT; - -/****************************************************************************/ -// Structure used in ComponentVideoInfoTable -/****************************************************************************/ -#define GPIO_PIN_ACTIVE_HIGH 0x1 -#define MAX_SUPPORTED_CV_STANDARDS 5 - -// definitions for ATOM_D_INFO.ucSettings -#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F // [4:0] -#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 // [6:5] = must be zeroed out -#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 // [7] - -typedef struct _ATOM_GPIO_INFO -{ - USHORT usAOffset; - UCHAR ucSettings; - UCHAR ucReserved; -}ATOM_GPIO_INFO; - -// definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector) -#define ATOM_CV_RESTRICT_FORMAT_SELECTION 0x2 - -// definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i -#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 //[7]; -#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F //[6:0] - -// definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode -//Line 3 out put 5V. -#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 //represent gpio 3 state for 16:9 -#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 //represent gpio 4 state for 16:9 -#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0 - -//Line 3 out put 2.2V -#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 //represent gpio 3 state for 4:3 Letter box -#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 //represent gpio 4 state for 4:3 Letter box -#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2 - -//Line 3 out put 0V -#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 //represent gpio 3 state for 4:3 -#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 //represent gpio 4 state for 4:3 -#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4 - -#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F // bit [5:0] - -#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 //bit 7 - -//GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks. -#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 //bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. -#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 //bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. - - -typedef struct _ATOM_COMPONENT_VIDEO_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usMask_PinRegisterIndex; - USHORT usEN_PinRegisterIndex; - USHORT usY_PinRegisterIndex; - USHORT usA_PinRegisterIndex; - UCHAR ucBitShift; - UCHAR ucPinActiveState; //ucPinActiveState: Bit0=1 active high, =0 active low - ATOM_DTD_FORMAT sReserved; // must be zeroed out - UCHAR ucMiscInfo; - UCHAR uc480i; - UCHAR uc480p; - UCHAR uc720p; - UCHAR uc1080i; - UCHAR ucLetterBoxMode; - UCHAR ucReserved[3]; - UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector - ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS]; - ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS]; -}ATOM_COMPONENT_VIDEO_INFO; - -//ucTableFormatRevision=2 -//ucTableContentRevision=1 -typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR ucMiscInfo; - UCHAR uc480i; - UCHAR uc480p; - UCHAR uc720p; - UCHAR uc1080i; - UCHAR ucReserved; - UCHAR ucLetterBoxMode; - UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector - ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS]; - ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS]; -}ATOM_COMPONENT_VIDEO_INFO_V21; - -#define ATOM_COMPONENT_VIDEO_INFO_LAST ATOM_COMPONENT_VIDEO_INFO_V21 - -/****************************************************************************/ -// Structure used in object_InfoTable -/****************************************************************************/ -typedef struct _ATOM_OBJECT_HEADER -{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usDeviceSupport; - USHORT usConnectorObjectTableOffset; - USHORT usRouterObjectTableOffset; - USHORT usEncoderObjectTableOffset; - USHORT usProtectionObjectTableOffset; //only available when Protection block is independent. - USHORT usDisplayPathTableOffset; -}ATOM_OBJECT_HEADER; - -typedef struct _ATOM_OBJECT_HEADER_V3 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usDeviceSupport; - USHORT usConnectorObjectTableOffset; - USHORT usRouterObjectTableOffset; - USHORT usEncoderObjectTableOffset; - USHORT usProtectionObjectTableOffset; //only available when Protection block is independent. - USHORT usDisplayPathTableOffset; - USHORT usMiscObjectTableOffset; -}ATOM_OBJECT_HEADER_V3; - - -typedef struct _ATOM_DISPLAY_OBJECT_PATH -{ - USHORT usDeviceTag; //supported device - USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH - USHORT usConnObjectId; //Connector Object ID - USHORT usGPUObjectId; //GPU ID - USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. -}ATOM_DISPLAY_OBJECT_PATH; - -typedef struct _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH -{ - USHORT usDeviceTag; //supported device - USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH - USHORT usConnObjectId; //Connector Object ID - USHORT usGPUObjectId; //GPU ID - USHORT usGraphicObjIds[2]; //usGraphicObjIds[0]= GPU internal encoder, usGraphicObjIds[1]= external encoder -}ATOM_DISPLAY_EXTERNAL_OBJECT_PATH; - -typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE -{ - UCHAR ucNumOfDispPath; - UCHAR ucVersion; - UCHAR ucPadding[2]; - ATOM_DISPLAY_OBJECT_PATH asDispPath[1]; -}ATOM_DISPLAY_OBJECT_PATH_TABLE; - -typedef struct _ATOM_OBJECT //each object has this structure -{ - USHORT usObjectID; - USHORT usSrcDstTableOffset; - USHORT usRecordOffset; //this pointing to a bunch of records defined below - USHORT usReserved; -}ATOM_OBJECT; - -typedef struct _ATOM_OBJECT_TABLE //Above 4 object table offset pointing to a bunch of objects all have this structure -{ - UCHAR ucNumberOfObjects; - UCHAR ucPadding[3]; - ATOM_OBJECT asObjects[1]; -}ATOM_OBJECT_TABLE; - -typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset pointing to this structure -{ - UCHAR ucNumberOfSrc; - USHORT usSrcObjectID[1]; - UCHAR ucNumberOfDst; - USHORT usDstObjectID[1]; -}ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT; - - -//Two definitions below are for OPM on MXM module designs - -#define EXT_HPDPIN_LUTINDEX_0 0 -#define EXT_HPDPIN_LUTINDEX_1 1 -#define EXT_HPDPIN_LUTINDEX_2 2 -#define EXT_HPDPIN_LUTINDEX_3 3 -#define EXT_HPDPIN_LUTINDEX_4 4 -#define EXT_HPDPIN_LUTINDEX_5 5 -#define EXT_HPDPIN_LUTINDEX_6 6 -#define EXT_HPDPIN_LUTINDEX_7 7 -#define MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES (EXT_HPDPIN_LUTINDEX_7+1) - -#define EXT_AUXDDC_LUTINDEX_0 0 -#define EXT_AUXDDC_LUTINDEX_1 1 -#define EXT_AUXDDC_LUTINDEX_2 2 -#define EXT_AUXDDC_LUTINDEX_3 3 -#define EXT_AUXDDC_LUTINDEX_4 4 -#define EXT_AUXDDC_LUTINDEX_5 5 -#define EXT_AUXDDC_LUTINDEX_6 6 -#define EXT_AUXDDC_LUTINDEX_7 7 -#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1) - -//ucChannelMapping are defined as following -//for DP connector, eDP, DP to VGA/LVDS -//Bit[1:0]: Define which pin connect to DP connector DP_Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 -//Bit[3:2]: Define which pin connect to DP connector DP_Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 -//Bit[5:4]: Define which pin connect to DP connector DP_Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 -//Bit[7:6]: Define which pin connect to DP connector DP_Lane3, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 -typedef struct _ATOM_DP_CONN_CHANNEL_MAPPING -{ -#if ATOM_BIG_ENDIAN - UCHAR ucDP_Lane3_Source:2; - UCHAR ucDP_Lane2_Source:2; - UCHAR ucDP_Lane1_Source:2; - UCHAR ucDP_Lane0_Source:2; -#else - UCHAR ucDP_Lane0_Source:2; - UCHAR ucDP_Lane1_Source:2; - UCHAR ucDP_Lane2_Source:2; - UCHAR ucDP_Lane3_Source:2; -#endif -}ATOM_DP_CONN_CHANNEL_MAPPING; - -//for DVI/HDMI, in dual link case, both links have to have same mapping. -//Bit[1:0]: Define which pin connect to DVI connector data Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 -//Bit[3:2]: Define which pin connect to DVI connector data Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 -//Bit[5:4]: Define which pin connect to DVI connector data Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 -//Bit[7:6]: Define which pin connect to DVI connector clock lane, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 -typedef struct _ATOM_DVI_CONN_CHANNEL_MAPPING -{ -#if ATOM_BIG_ENDIAN - UCHAR ucDVI_CLK_Source:2; - UCHAR ucDVI_DATA0_Source:2; - UCHAR ucDVI_DATA1_Source:2; - UCHAR ucDVI_DATA2_Source:2; -#else - UCHAR ucDVI_DATA2_Source:2; - UCHAR ucDVI_DATA1_Source:2; - UCHAR ucDVI_DATA0_Source:2; - UCHAR ucDVI_CLK_Source:2; -#endif -}ATOM_DVI_CONN_CHANNEL_MAPPING; - -typedef struct _EXT_DISPLAY_PATH -{ - USHORT usDeviceTag; //A bit vector to show what devices are supported - USHORT usDeviceACPIEnum; //16bit device ACPI id. - USHORT usDeviceConnector; //A physical connector for displays to plug in, using object connector definitions - UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT - UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT - USHORT usExtEncoderObjId; //external encoder object id - union{ - UCHAR ucChannelMapping; // if ucChannelMapping=0, using default one to one mapping - ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping; - ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping; - }; - UCHAR ucChPNInvert; // bit vector for up to 8 lanes, =0: P and N is not invert, =1 P and N is inverted - USHORT usCaps; - USHORT usReserved; -}EXT_DISPLAY_PATH; - -#define NUMBER_OF_UCHAR_FOR_GUID 16 -#define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7 - -//usCaps -#define EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE 0x01 -#define EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN 0x02 -#define EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 0x04 -#define EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT 0x08 - -typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string - EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries. - UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0. - UCHAR uc3DStereoPinId; // use for eDP panel - UCHAR ucRemoteDisplayConfig; - UCHAR uceDPToLVDSRxId; - UCHAR ucFixDPVoltageSwing; // usCaps[1]=1, this indicate DP_LANE_SET value - UCHAR Reserved[3]; // for potential expansion -}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO; - -//Related definitions, all records are differnt but they have a commond header -typedef struct _ATOM_COMMON_RECORD_HEADER -{ - UCHAR ucRecordType; //An emun to indicate the record type - UCHAR ucRecordSize; //The size of the whole record in byte -}ATOM_COMMON_RECORD_HEADER; - - -#define ATOM_I2C_RECORD_TYPE 1 -#define ATOM_HPD_INT_RECORD_TYPE 2 -#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE 3 -#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE 4 -#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE -#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE -#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE 7 -#define ATOM_JTAG_RECORD_TYPE 8 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE -#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE 9 -#define ATOM_ENCODER_DVO_CF_RECORD_TYPE 10 -#define ATOM_CONNECTOR_CF_RECORD_TYPE 11 -#define ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE 12 -#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE 13 -#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14 -#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15 -#define ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE 16 //This is for the case when connectors are not known to object table -#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table -#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record -#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19 -#define ATOM_ENCODER_CAP_RECORD_TYPE 20 -#define ATOM_BRACKET_LAYOUT_RECORD_TYPE 21 - - -//Must be updated when new record type is added,equal to that record definition! -#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_ENCODER_CAP_RECORD_TYPE - -typedef struct _ATOM_I2C_RECORD -{ - ATOM_COMMON_RECORD_HEADER sheader; - ATOM_I2C_ID_CONFIG sucI2cId; - UCHAR ucI2CAddr; //The slave address, it's 0 when the record is attached to connector for DDC -}ATOM_I2C_RECORD; - -typedef struct _ATOM_HPD_INT_RECORD -{ - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucHPDIntGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info - UCHAR ucPlugged_PinState; -}ATOM_HPD_INT_RECORD; - - -typedef struct _ATOM_OUTPUT_PROTECTION_RECORD -{ - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucProtectionFlag; - UCHAR ucReserved; -}ATOM_OUTPUT_PROTECTION_RECORD; - -typedef struct _ATOM_CONNECTOR_DEVICE_TAG -{ - ULONG ulACPIDeviceEnum; //Reserved for now - USHORT usDeviceID; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT" - USHORT usPadding; -}ATOM_CONNECTOR_DEVICE_TAG; - -typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD -{ - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucNumberOfDevice; - UCHAR ucReserved; - ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation -}ATOM_CONNECTOR_DEVICE_TAG_RECORD; - - -typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD -{ - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucConfigGPIOID; - UCHAR ucConfigGPIOState; //Set to 1 when it's active high to enable external flow in - UCHAR ucFlowinGPIPID; - UCHAR ucExtInGPIPID; -}ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD; - -typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD -{ - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucCTL1GPIO_ID; - UCHAR ucCTL1GPIOState; //Set to 1 when it's active high - UCHAR ucCTL2GPIO_ID; - UCHAR ucCTL2GPIOState; //Set to 1 when it's active high - UCHAR ucCTL3GPIO_ID; - UCHAR ucCTL3GPIOState; //Set to 1 when it's active high - UCHAR ucCTLFPGA_IN_ID; - UCHAR ucPadding[3]; -}ATOM_ENCODER_FPGA_CONTROL_RECORD; - -typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD -{ - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info - UCHAR ucTVActiveState; //Indicating when the pin==0 or 1 when TV is connected -}ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD; - -typedef struct _ATOM_JTAG_RECORD -{ - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucTMSGPIO_ID; - UCHAR ucTMSGPIOState; //Set to 1 when it's active high - UCHAR ucTCKGPIO_ID; - UCHAR ucTCKGPIOState; //Set to 1 when it's active high - UCHAR ucTDOGPIO_ID; - UCHAR ucTDOGPIOState; //Set to 1 when it's active high - UCHAR ucTDIGPIO_ID; - UCHAR ucTDIGPIOState; //Set to 1 when it's active high - UCHAR ucPadding[2]; -}ATOM_JTAG_RECORD; - - -//The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually -typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR -{ - UCHAR ucGPIOID; // GPIO_ID, find the corresponding ID in GPIO_LUT table - UCHAR ucGPIO_PinState; // Pin state showing how to set-up the pin -}ATOM_GPIO_PIN_CONTROL_PAIR; - -typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD -{ - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucFlags; // Future expnadibility - UCHAR ucNumberOfPins; // Number of GPIO pins used to control the object - ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; // the real gpio pin pair determined by number of pins ucNumberOfPins -}ATOM_OBJECT_GPIO_CNTL_RECORD; - -//Definitions for GPIO pin state -#define GPIO_PIN_TYPE_INPUT 0x00 -#define GPIO_PIN_TYPE_OUTPUT 0x10 -#define GPIO_PIN_TYPE_HW_CONTROL 0x20 - -//For GPIO_PIN_TYPE_OUTPUT the following is defined -#define GPIO_PIN_OUTPUT_STATE_MASK 0x01 -#define GPIO_PIN_OUTPUT_STATE_SHIFT 0 -#define GPIO_PIN_STATE_ACTIVE_LOW 0x0 -#define GPIO_PIN_STATE_ACTIVE_HIGH 0x1 - -// Indexes to GPIO array in GLSync record -// GLSync record is for Frame Lock/Gen Lock feature. -#define ATOM_GPIO_INDEX_GLSYNC_REFCLK 0 -#define ATOM_GPIO_INDEX_GLSYNC_HSYNC 1 -#define ATOM_GPIO_INDEX_GLSYNC_VSYNC 2 -#define ATOM_GPIO_INDEX_GLSYNC_SWAP_REQ 3 -#define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT 4 -#define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5 -#define ATOM_GPIO_INDEX_GLSYNC_V_RESET 6 -#define ATOM_GPIO_INDEX_GLSYNC_SWAP_CNTL 7 -#define ATOM_GPIO_INDEX_GLSYNC_SWAP_SEL 8 -#define ATOM_GPIO_INDEX_GLSYNC_MAX 9 - -typedef struct _ATOM_ENCODER_DVO_CF_RECORD -{ - ATOM_COMMON_RECORD_HEADER sheader; - ULONG ulStrengthControl; // DVOA strength control for CF - UCHAR ucPadding[2]; -}ATOM_ENCODER_DVO_CF_RECORD; - -// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap -#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by HW encoder -#define ATOM_ENCODER_CAP_RECORD_HBR2_EN 0x02 // DP1.2 HBR2 setting is qualified and HBR2 can be enabled -#define ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN 0x04 // HDMI2.0 6Gbps enable or not. - -typedef struct _ATOM_ENCODER_CAP_RECORD -{ - ATOM_COMMON_RECORD_HEADER sheader; - union { - USHORT usEncoderCap; - struct { -#if ATOM_BIG_ENDIAN - USHORT usReserved:14; // Bit1-15 may be defined for other capability in future - USHORT usHBR2En:1; // Bit1 is for DP1.2 HBR2 enable - USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability. -#else - USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability. - USHORT usHBR2En:1; // Bit1 is for DP1.2 HBR2 enable - USHORT usReserved:14; // Bit1-15 may be defined for other capability in future -#endif - }; - }; -}ATOM_ENCODER_CAP_RECORD; - -// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle -#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1 -#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2 - -typedef struct _ATOM_CONNECTOR_CF_RECORD -{ - ATOM_COMMON_RECORD_HEADER sheader; - USHORT usMaxPixClk; - UCHAR ucFlowCntlGpioId; - UCHAR ucSwapCntlGpioId; - UCHAR ucConnectedDvoBundle; - UCHAR ucPadding; -}ATOM_CONNECTOR_CF_RECORD; - -typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD -{ - ATOM_COMMON_RECORD_HEADER sheader; - ATOM_DTD_FORMAT asTiming; -}ATOM_CONNECTOR_HARDCODE_DTD_RECORD; - -typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD -{ - ATOM_COMMON_RECORD_HEADER sheader; //ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE - UCHAR ucSubConnectorType; //CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A - UCHAR ucReserved; -}ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD; - - -typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD -{ - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucMuxType; //decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state - UCHAR ucMuxControlPin; - UCHAR ucMuxState[2]; //for alligment purpose -}ATOM_ROUTER_DDC_PATH_SELECT_RECORD; - -typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD -{ - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucMuxType; - UCHAR ucMuxControlPin; - UCHAR ucMuxState[2]; //for alligment purpose -}ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD; - -// define ucMuxType -#define ATOM_ROUTER_MUX_PIN_STATE_MASK 0x0f -#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT 0x01 - -typedef struct _ATOM_CONNECTOR_HPDPIN_LUT_RECORD //record for ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE -{ - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucHPDPINMap[MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES]; //An fixed size array which maps external pins to internal GPIO_PIN_INFO table -}ATOM_CONNECTOR_HPDPIN_LUT_RECORD; - -typedef struct _ATOM_CONNECTOR_AUXDDC_LUT_RECORD //record for ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE -{ - ATOM_COMMON_RECORD_HEADER sheader; - ATOM_I2C_ID_CONFIG ucAUXDDCMap[MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES]; //An fixed size array which maps external pins to internal DDC ID -}ATOM_CONNECTOR_AUXDDC_LUT_RECORD; - -typedef struct _ATOM_OBJECT_LINK_RECORD -{ - ATOM_COMMON_RECORD_HEADER sheader; - USHORT usObjectID; //could be connector, encorder or other object in object.h -}ATOM_OBJECT_LINK_RECORD; - -typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD -{ - ATOM_COMMON_RECORD_HEADER sheader; - USHORT usReserved; -}ATOM_CONNECTOR_REMOTE_CAP_RECORD; - -typedef struct _ATOM_CONNECTOR_LAYOUT_INFO -{ - USHORT usConnectorObjectId; - UCHAR ucConnectorType; - UCHAR ucPosition; -}ATOM_CONNECTOR_LAYOUT_INFO; - -// define ATOM_CONNECTOR_LAYOUT_INFO.ucConnectorType to describe the display connector size -#define CONNECTOR_TYPE_DVI_D 1 -#define CONNECTOR_TYPE_DVI_I 2 -#define CONNECTOR_TYPE_VGA 3 -#define CONNECTOR_TYPE_HDMI 4 -#define CONNECTOR_TYPE_DISPLAY_PORT 5 -#define CONNECTOR_TYPE_MINI_DISPLAY_PORT 6 - -typedef struct _ATOM_BRACKET_LAYOUT_RECORD -{ - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucLength; - UCHAR ucWidth; - UCHAR ucConnNum; - UCHAR ucReserved; - ATOM_CONNECTOR_LAYOUT_INFO asConnInfo[1]; -}ATOM_BRACKET_LAYOUT_RECORD; - - -/****************************************************************************/ -// Structure used in XXXX -/****************************************************************************/ -typedef struct _ATOM_VOLTAGE_INFO_HEADER -{ - USHORT usVDDCBaseLevel; //In number of 50mv unit - USHORT usReserved; //For possible extension table offset - UCHAR ucNumOfVoltageEntries; - UCHAR ucBytesPerVoltageEntry; - UCHAR ucVoltageStep; //Indicating in how many mv increament is one step, 0.5mv unit - UCHAR ucDefaultVoltageEntry; - UCHAR ucVoltageControlI2cLine; - UCHAR ucVoltageControlAddress; - UCHAR ucVoltageControlOffset; -}ATOM_VOLTAGE_INFO_HEADER; - -typedef struct _ATOM_VOLTAGE_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_VOLTAGE_INFO_HEADER viHeader; - UCHAR ucVoltageEntries[64]; //64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry -}ATOM_VOLTAGE_INFO; - - -typedef struct _ATOM_VOLTAGE_FORMULA -{ - USHORT usVoltageBaseLevel; // In number of 1mv unit - USHORT usVoltageStep; // Indicating in how many mv increament is one step, 1mv unit - UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage - UCHAR ucFlag; // bit0=0 :step is 1mv =1 0.5mv - UCHAR ucBaseVID; // if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep - UCHAR ucReserved; - UCHAR ucVIDAdjustEntries[32]; // 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries -}ATOM_VOLTAGE_FORMULA; - -typedef struct _VOLTAGE_LUT_ENTRY -{ - USHORT usVoltageCode; // The Voltage ID, either GPIO or I2C code - USHORT usVoltageValue; // The corresponding Voltage Value, in mV -}VOLTAGE_LUT_ENTRY; - -typedef struct _ATOM_VOLTAGE_FORMULA_V2 -{ - UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage - UCHAR ucReserved[3]; - VOLTAGE_LUT_ENTRY asVIDAdjustEntries[32];// 32 is for allocation, the actual number of entries is in ucNumOfVoltageEntries -}ATOM_VOLTAGE_FORMULA_V2; - -typedef struct _ATOM_VOLTAGE_CONTROL -{ - UCHAR ucVoltageControlId; //Indicate it is controlled by I2C or GPIO or HW state machine - UCHAR ucVoltageControlI2cLine; - UCHAR ucVoltageControlAddress; - UCHAR ucVoltageControlOffset; - USHORT usGpioPin_AIndex; //GPIO_PAD register index - UCHAR ucGpioPinBitShift[9]; //at most 8 pin support 255 VIDs, termintate with 0xff - UCHAR ucReserved; -}ATOM_VOLTAGE_CONTROL; - -// Define ucVoltageControlId -#define VOLTAGE_CONTROLLED_BY_HW 0x00 -#define VOLTAGE_CONTROLLED_BY_I2C_MASK 0x7F -#define VOLTAGE_CONTROLLED_BY_GPIO 0x80 -#define VOLTAGE_CONTROL_ID_LM64 0x01 //I2C control, used for R5xx Core Voltage -#define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI -#define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage -#define VOLTAGE_CONTROL_ID_DS4402 0x04 -#define VOLTAGE_CONTROL_ID_UP6266 0x05 -#define VOLTAGE_CONTROL_ID_SCORPIO 0x06 -#define VOLTAGE_CONTROL_ID_VT1556M 0x07 -#define VOLTAGE_CONTROL_ID_CHL822x 0x08 -#define VOLTAGE_CONTROL_ID_VT1586M 0x09 -#define VOLTAGE_CONTROL_ID_UP1637 0x0A -#define VOLTAGE_CONTROL_ID_CHL8214 0x0B -#define VOLTAGE_CONTROL_ID_UP1801 0x0C -#define VOLTAGE_CONTROL_ID_ST6788A 0x0D -#define VOLTAGE_CONTROL_ID_CHLIR3564SVI2 0x0E -#define VOLTAGE_CONTROL_ID_AD527x 0x0F -#define VOLTAGE_CONTROL_ID_NCP81022 0x10 -#define VOLTAGE_CONTROL_ID_LTC2635 0x11 -#define VOLTAGE_CONTROL_ID_NCP4208 0x12 -#define VOLTAGE_CONTROL_ID_IR35xx 0x13 -#define VOLTAGE_CONTROL_ID_RT9403 0x14 - -#define VOLTAGE_CONTROL_ID_GENERIC_I2C 0x40 - -typedef struct _ATOM_VOLTAGE_OBJECT -{ - UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI - UCHAR ucSize; //Size of Object - ATOM_VOLTAGE_CONTROL asControl; //describ how to control - ATOM_VOLTAGE_FORMULA asFormula; //Indicate How to convert real Voltage to VID -}ATOM_VOLTAGE_OBJECT; - -typedef struct _ATOM_VOLTAGE_OBJECT_V2 -{ - UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI - UCHAR ucSize; //Size of Object - ATOM_VOLTAGE_CONTROL asControl; //describ how to control - ATOM_VOLTAGE_FORMULA_V2 asFormula; //Indicate How to convert real Voltage to VID -}ATOM_VOLTAGE_OBJECT_V2; - -typedef struct _ATOM_VOLTAGE_OBJECT_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_VOLTAGE_OBJECT asVoltageObj[3]; //Info for Voltage control -}ATOM_VOLTAGE_OBJECT_INFO; - -typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V2 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_VOLTAGE_OBJECT_V2 asVoltageObj[3]; //Info for Voltage control -}ATOM_VOLTAGE_OBJECT_INFO_V2; - -typedef struct _ATOM_LEAKID_VOLTAGE -{ - UCHAR ucLeakageId; - UCHAR ucReserved; - USHORT usVoltage; -}ATOM_LEAKID_VOLTAGE; - -typedef struct _ATOM_VOLTAGE_OBJECT_HEADER_V3{ - UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI - UCHAR ucVoltageMode; //Indicate voltage control mode: Init/Set/Leakage/Set phase - USHORT usSize; //Size of Object -}ATOM_VOLTAGE_OBJECT_HEADER_V3; - -// ATOM_VOLTAGE_OBJECT_HEADER_V3.ucVoltageMode -#define VOLTAGE_OBJ_GPIO_LUT 0 //VOLTAGE and GPIO Lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3 -#define VOLTAGE_OBJ_VR_I2C_INIT_SEQ 3 //VOLTAGE REGULATOR INIT sequece through I2C -> ATOM_I2C_VOLTAGE_OBJECT_V3 -#define VOLTAGE_OBJ_PHASE_LUT 4 //Set Vregulator Phase lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3 -#define VOLTAGE_OBJ_SVID2 7 //Indicate voltage control by SVID2 ->ATOM_SVID2_VOLTAGE_OBJECT_V3 -#define VOLTAGE_OBJ_EVV 8 -#define VOLTAGE_OBJ_PWRBOOST_LEAKAGE_LUT 0x10 //Powerboost Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 -#define VOLTAGE_OBJ_HIGH_STATE_LEAKAGE_LUT 0x11 //High voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 -#define VOLTAGE_OBJ_HIGH1_STATE_LEAKAGE_LUT 0x12 //High1 voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 - -typedef struct _VOLTAGE_LUT_ENTRY_V2 -{ - ULONG ulVoltageId; // The Voltage ID which is used to program GPIO register - USHORT usVoltageValue; // The corresponding Voltage Value, in mV -}VOLTAGE_LUT_ENTRY_V2; - -typedef struct _LEAKAGE_VOLTAGE_LUT_ENTRY_V2 -{ - USHORT usVoltageLevel; // The Voltage ID which is used to program GPIO register - USHORT usVoltageId; - USHORT usLeakageId; // The corresponding Voltage Value, in mV -}LEAKAGE_VOLTAGE_LUT_ENTRY_V2; - - -typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3 -{ - ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_VR_I2C_INIT_SEQ - UCHAR ucVoltageRegulatorId; //Indicate Voltage Regulator Id - UCHAR ucVoltageControlI2cLine; - UCHAR ucVoltageControlAddress; - UCHAR ucVoltageControlOffset; - UCHAR ucVoltageControlFlag; // Bit0: 0 - One byte data; 1 - Two byte data - UCHAR ulReserved[3]; - VOLTAGE_LUT_ENTRY asVolI2cLut[1]; // end with 0xff -}ATOM_I2C_VOLTAGE_OBJECT_V3; - -// ATOM_I2C_VOLTAGE_OBJECT_V3.ucVoltageControlFlag -#define VOLTAGE_DATA_ONE_BYTE 0 -#define VOLTAGE_DATA_TWO_BYTE 1 - -typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3 -{ - ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT - UCHAR ucVoltageGpioCntlId; // default is 0 which indicate control through CG VID mode - UCHAR ucGpioEntryNum; // indiate the entry numbers of Votlage/Gpio value Look up table - UCHAR ucPhaseDelay; // phase delay in unit of micro second - UCHAR ucReserved; - ULONG ulGpioMaskVal; // GPIO Mask value - VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[1]; -}ATOM_GPIO_VOLTAGE_OBJECT_V3; - -typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 -{ - ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = 0x10/0x11/0x12 - UCHAR ucLeakageCntlId; // default is 0 - UCHAR ucLeakageEntryNum; // indicate the entry number of LeakageId/Voltage Lut table - UCHAR ucReserved[2]; - ULONG ulMaxVoltageLevel; - LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1]; -}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3; - - -typedef struct _ATOM_SVID2_VOLTAGE_OBJECT_V3 -{ - ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_SVID2 -// 14:7 � PSI0_VID -// 6 � PSI0_EN -// 5 � PSI1 -// 4:2 � load line slope trim. -// 1:0 � offset trim, - USHORT usLoadLine_PSI; -// GPU GPIO pin Id to SVID2 regulator VRHot pin. possible value 0~31. 0 means GPIO0, 31 means GPIO31 - UCHAR ucSVDGpioId; //0~31 indicate GPIO0~31 - UCHAR ucSVCGpioId; //0~31 indicate GPIO0~31 - ULONG ulReserved; -}ATOM_SVID2_VOLTAGE_OBJECT_V3; - -typedef union _ATOM_VOLTAGE_OBJECT_V3{ - ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj; - ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj; - ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj; - ATOM_SVID2_VOLTAGE_OBJECT_V3 asSVID2Obj; -}ATOM_VOLTAGE_OBJECT_V3; - -typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_VOLTAGE_OBJECT_V3 asVoltageObj[3]; //Info for Voltage control -}ATOM_VOLTAGE_OBJECT_INFO_V3_1; - - -typedef struct _ATOM_ASIC_PROFILE_VOLTAGE -{ - UCHAR ucProfileId; - UCHAR ucReserved; - USHORT usSize; - USHORT usEfuseSpareStartAddr; - USHORT usFuseIndex[8]; //from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id, - ATOM_LEAKID_VOLTAGE asLeakVol[2]; //Leakid and relatd voltage -}ATOM_ASIC_PROFILE_VOLTAGE; - -//ucProfileId -#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1 -#define ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE 1 -#define ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE 2 - -typedef struct _ATOM_ASIC_PROFILING_INFO -{ - ATOM_COMMON_TABLE_HEADER asHeader; - ATOM_ASIC_PROFILE_VOLTAGE asVoltage; -}ATOM_ASIC_PROFILING_INFO; - -typedef struct _ATOM_ASIC_PROFILING_INFO_V2_1 -{ - ATOM_COMMON_TABLE_HEADER asHeader; - UCHAR ucLeakageBinNum; // indicate the entry number of LeakageId/Voltage Lut table - USHORT usLeakageBinArrayOffset; // offset of USHORT Leakage Bin list array ( from lower LeakageId to higher) - - UCHAR ucElbVDDC_Num; - USHORT usElbVDDC_IdArrayOffset; // offset of USHORT virtual VDDC voltage id ( 0xff01~0xff08 ) - USHORT usElbVDDC_LevelArrayOffset; // offset of 2 dimension voltage level USHORT array - - UCHAR ucElbVDDCI_Num; - USHORT usElbVDDCI_IdArrayOffset; // offset of USHORT virtual VDDCI voltage id ( 0xff01~0xff08 ) - USHORT usElbVDDCI_LevelArrayOffset; // offset of 2 dimension voltage level USHORT array -}ATOM_ASIC_PROFILING_INFO_V2_1; - - -//Here is parameter to convert Efuse value to Measure value -//Measured = LN((2^Bitsize-1)/EFUSE-1)*(Range)/(-alpha)+(Max+Min)/2 -typedef struct _EFUSE_LOGISTIC_FUNC_PARAM -{ - USHORT usEfuseIndex; // Efuse Index in DWORD address, for example Index 911, usEuseIndex=112 - UCHAR ucEfuseBitLSB; // Efuse bit LSB in DWORD address, for example Index 911, usEfuseBitLSB= 911-112*8=15 - UCHAR ucEfuseLength; // Efuse bits length, - ULONG ulEfuseEncodeRange; // Range = Max - Min, bit31 indicate the efuse is negative number - ULONG ulEfuseEncodeAverage; // Average = ( Max + Min )/2 -}EFUSE_LOGISTIC_FUNC_PARAM; - -//Linear Function: Measured = Round ( Efuse * ( Max-Min )/(2^BitSize -1 ) + Min ) -typedef struct _EFUSE_LINEAR_FUNC_PARAM -{ - USHORT usEfuseIndex; // Efuse Index in DWORD address, for example Index 911, usEuseIndex=112 - UCHAR ucEfuseBitLSB; // Efuse bit LSB in DWORD address, for example Index 911, usEfuseBitLSB= 911-112*8=15 - UCHAR ucEfuseLength; // Efuse bits length, - ULONG ulEfuseEncodeRange; // Range = Max - Min, bit31 indicate the efuse is negative number - ULONG ulEfuseMin; // Min -}EFUSE_LINEAR_FUNC_PARAM; - - -typedef struct _ATOM_ASIC_PROFILING_INFO_V3_1 -{ - ATOM_COMMON_TABLE_HEADER asHeader; - ULONG ulEvvDerateTdp; - ULONG ulEvvDerateTdc; - ULONG ulBoardCoreTemp; - ULONG ulMaxVddc; - ULONG ulMinVddc; - ULONG ulLoadLineSlop; - ULONG ulLeakageTemp; - ULONG ulLeakageVoltage; - EFUSE_LINEAR_FUNC_PARAM sCACm; - EFUSE_LINEAR_FUNC_PARAM sCACb; - EFUSE_LOGISTIC_FUNC_PARAM sKt_b; - EFUSE_LOGISTIC_FUNC_PARAM sKv_m; - EFUSE_LOGISTIC_FUNC_PARAM sKv_b; - USHORT usLkgEuseIndex; - UCHAR ucLkgEfuseBitLSB; - UCHAR ucLkgEfuseLength; - ULONG ulLkgEncodeLn_MaxDivMin; - ULONG ulLkgEncodeMax; - ULONG ulLkgEncodeMin; - ULONG ulEfuseLogisticAlpha; - USHORT usPowerDpm0; - USHORT usCurrentDpm0; - USHORT usPowerDpm1; - USHORT usCurrentDpm1; - USHORT usPowerDpm2; - USHORT usCurrentDpm2; - USHORT usPowerDpm3; - USHORT usCurrentDpm3; - USHORT usPowerDpm4; - USHORT usCurrentDpm4; - USHORT usPowerDpm5; - USHORT usCurrentDpm5; - USHORT usPowerDpm6; - USHORT usCurrentDpm6; - USHORT usPowerDpm7; - USHORT usCurrentDpm7; -}ATOM_ASIC_PROFILING_INFO_V3_1; - - -typedef struct _ATOM_ASIC_PROFILING_INFO_V3_2 -{ - ATOM_COMMON_TABLE_HEADER asHeader; - ULONG ulEvvLkgFactor; - ULONG ulBoardCoreTemp; - ULONG ulMaxVddc; - ULONG ulMinVddc; - ULONG ulLoadLineSlop; - ULONG ulLeakageTemp; - ULONG ulLeakageVoltage; - EFUSE_LINEAR_FUNC_PARAM sCACm; - EFUSE_LINEAR_FUNC_PARAM sCACb; - EFUSE_LOGISTIC_FUNC_PARAM sKt_b; - EFUSE_LOGISTIC_FUNC_PARAM sKv_m; - EFUSE_LOGISTIC_FUNC_PARAM sKv_b; - USHORT usLkgEuseIndex; - UCHAR ucLkgEfuseBitLSB; - UCHAR ucLkgEfuseLength; - ULONG ulLkgEncodeLn_MaxDivMin; - ULONG ulLkgEncodeMax; - ULONG ulLkgEncodeMin; - ULONG ulEfuseLogisticAlpha; - USHORT usPowerDpm0; - USHORT usPowerDpm1; - USHORT usPowerDpm2; - USHORT usPowerDpm3; - USHORT usPowerDpm4; - USHORT usPowerDpm5; - USHORT usPowerDpm6; - USHORT usPowerDpm7; - ULONG ulTdpDerateDPM0; - ULONG ulTdpDerateDPM1; - ULONG ulTdpDerateDPM2; - ULONG ulTdpDerateDPM3; - ULONG ulTdpDerateDPM4; - ULONG ulTdpDerateDPM5; - ULONG ulTdpDerateDPM6; - ULONG ulTdpDerateDPM7; -}ATOM_ASIC_PROFILING_INFO_V3_2; - - -// for Tonga/Fiji speed EVV algorithm -typedef struct _ATOM_ASIC_PROFILING_INFO_V3_3 -{ - ATOM_COMMON_TABLE_HEADER asHeader; - ULONG ulEvvLkgFactor; - ULONG ulBoardCoreTemp; - ULONG ulMaxVddc; - ULONG ulMinVddc; - ULONG ulLoadLineSlop; - ULONG ulLeakageTemp; - ULONG ulLeakageVoltage; - EFUSE_LINEAR_FUNC_PARAM sCACm; - EFUSE_LINEAR_FUNC_PARAM sCACb; - EFUSE_LOGISTIC_FUNC_PARAM sKt_b; - EFUSE_LOGISTIC_FUNC_PARAM sKv_m; - EFUSE_LOGISTIC_FUNC_PARAM sKv_b; - USHORT usLkgEuseIndex; - UCHAR ucLkgEfuseBitLSB; - UCHAR ucLkgEfuseLength; - ULONG ulLkgEncodeLn_MaxDivMin; - ULONG ulLkgEncodeMax; - ULONG ulLkgEncodeMin; - ULONG ulEfuseLogisticAlpha; - USHORT usPowerDpm0; - USHORT usPowerDpm1; - USHORT usPowerDpm2; - USHORT usPowerDpm3; - USHORT usPowerDpm4; - USHORT usPowerDpm5; - USHORT usPowerDpm6; - USHORT usPowerDpm7; - ULONG ulTdpDerateDPM0; - ULONG ulTdpDerateDPM1; - ULONG ulTdpDerateDPM2; - ULONG ulTdpDerateDPM3; - ULONG ulTdpDerateDPM4; - ULONG ulTdpDerateDPM5; - ULONG ulTdpDerateDPM6; - ULONG ulTdpDerateDPM7; - EFUSE_LINEAR_FUNC_PARAM sRoFuse; - ULONG ulRoAlpha; - ULONG ulRoBeta; - ULONG ulRoGamma; - ULONG ulRoEpsilon; - ULONG ulATermRo; - ULONG ulBTermRo; - ULONG ulCTermRo; - ULONG ulSclkMargin; - ULONG ulFmaxPercent; - ULONG ulCRPercent; - ULONG ulSFmaxPercent; - ULONG ulSCRPercent; - ULONG ulSDCMargine; -}ATOM_ASIC_PROFILING_INFO_V3_3; - -typedef struct _ATOM_POWER_SOURCE_OBJECT -{ - UCHAR ucPwrSrcId; // Power source - UCHAR ucPwrSensorType; // GPIO, I2C or none - UCHAR ucPwrSensId; // if GPIO detect, it is GPIO id, if I2C detect, it is I2C id - UCHAR ucPwrSensSlaveAddr; // Slave address if I2C detect - UCHAR ucPwrSensRegIndex; // I2C register Index if I2C detect - UCHAR ucPwrSensRegBitMask; // detect which bit is used if I2C detect - UCHAR ucPwrSensActiveState; // high active or low active - UCHAR ucReserve[3]; // reserve - USHORT usSensPwr; // in unit of watt -}ATOM_POWER_SOURCE_OBJECT; - -typedef struct _ATOM_POWER_SOURCE_INFO -{ - ATOM_COMMON_TABLE_HEADER asHeader; - UCHAR asPwrbehave[16]; - ATOM_POWER_SOURCE_OBJECT asPwrObj[1]; -}ATOM_POWER_SOURCE_INFO; - - -//Define ucPwrSrcId -#define POWERSOURCE_PCIE_ID1 0x00 -#define POWERSOURCE_6PIN_CONNECTOR_ID1 0x01 -#define POWERSOURCE_8PIN_CONNECTOR_ID1 0x02 -#define POWERSOURCE_6PIN_CONNECTOR_ID2 0x04 -#define POWERSOURCE_8PIN_CONNECTOR_ID2 0x08 - -//define ucPwrSensorId -#define POWER_SENSOR_ALWAYS 0x00 -#define POWER_SENSOR_GPIO 0x01 -#define POWER_SENSOR_I2C 0x02 - -typedef struct _ATOM_CLK_VOLT_CAPABILITY -{ - ULONG ulVoltageIndex; // The Voltage Index indicated by FUSE, same voltage index shared with SCLK DPM fuse table - ULONG ulMaximumSupportedCLK; // Maximum clock supported with specified voltage index, unit in 10kHz -}ATOM_CLK_VOLT_CAPABILITY; - - -typedef struct _ATOM_CLK_VOLT_CAPABILITY_V2 -{ - USHORT usVoltageLevel; // The real Voltage Level round up value in unit of mv, - ULONG ulMaximumSupportedCLK; // Maximum clock supported with specified voltage index, unit in 10kHz -}ATOM_CLK_VOLT_CAPABILITY_V2; - -typedef struct _ATOM_AVAILABLE_SCLK_LIST -{ - ULONG ulSupportedSCLK; // Maximum clock supported with specified voltage index, unit in 10kHz - USHORT usVoltageIndex; // The Voltage Index indicated by FUSE for specified SCLK - USHORT usVoltageID; // The Voltage ID indicated by FUSE for specified SCLK -}ATOM_AVAILABLE_SCLK_LIST; - -// ATOM_INTEGRATED_SYSTEM_INFO_V6 ulSystemConfig cap definition -#define ATOM_IGP_INFO_V6_SYSTEM_CONFIG__PCIE_POWER_GATING_ENABLE 1 // refer to ulSystemConfig bit[0] - -// this IntegrateSystemInfoTable is used for Liano/Ontario APU -typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulBootUpEngineClock; - ULONG ulDentistVCOFreq; - ULONG ulBootUpUMAClock; - ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4]; - ULONG ulBootUpReqDisplayVector; - ULONG ulOtherDisplayMisc; - ULONG ulGPUCapInfo; - ULONG ulSB_MMIO_Base_Addr; - USHORT usRequestedPWMFreqInHz; - UCHAR ucHtcTmpLmt; - UCHAR ucHtcHystLmt; - ULONG ulMinEngineClock; - ULONG ulSystemConfig; - ULONG ulCPUCapInfo; - USHORT usNBP0Voltage; - USHORT usNBP1Voltage; - USHORT usBootUpNBVoltage; - USHORT usExtDispConnInfoOffset; - USHORT usPanelRefreshRateRange; - UCHAR ucMemoryType; - UCHAR ucUMAChannelNumber; - ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10]; - ULONG ulCSR_M3_ARB_CNTL_UVD[10]; - ULONG ulCSR_M3_ARB_CNTL_FS3D[10]; - ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5]; - ULONG ulGMCRestoreResetTime; - ULONG ulMinimumNClk; - ULONG ulIdleNClk; - ULONG ulDDR_DLL_PowerUpTime; - ULONG ulDDR_PLL_PowerUpTime; - USHORT usPCIEClkSSPercentage; - USHORT usPCIEClkSSType; - USHORT usLvdsSSPercentage; - USHORT usLvdsSSpreadRateIn10Hz; - USHORT usHDMISSPercentage; - USHORT usHDMISSpreadRateIn10Hz; - USHORT usDVISSPercentage; - USHORT usDVISSpreadRateIn10Hz; - ULONG SclkDpmBoostMargin; - ULONG SclkDpmThrottleMargin; - USHORT SclkDpmTdpLimitPG; - USHORT SclkDpmTdpLimitBoost; - ULONG ulBoostEngineCLock; - UCHAR ulBoostVid_2bit; - UCHAR EnableBoost; - USHORT GnbTdpLimit; - USHORT usMaxLVDSPclkFreqInSingleLink; - UCHAR ucLvdsMisc; - UCHAR ucLVDSReserved; - ULONG ulReserved3[15]; - ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo; -}ATOM_INTEGRATED_SYSTEM_INFO_V6; - -// ulGPUCapInfo -#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01 -#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION 0x08 - -//ucLVDSMisc: -#define SYS_INFO_LVDSMISC__888_FPDI_MODE 0x01 -#define SYS_INFO_LVDSMISC__DL_CH_SWAP 0x02 -#define SYS_INFO_LVDSMISC__888_BPC 0x04 -#define SYS_INFO_LVDSMISC__OVERRIDE_EN 0x08 -#define SYS_INFO_LVDSMISC__BLON_ACTIVE_LOW 0x10 -// new since Trinity -#define SYS_INFO_LVDSMISC__TRAVIS_LVDS_VOL_OVERRIDE_EN 0x20 - -// not used any more -#define SYS_INFO_LVDSMISC__VSYNC_ACTIVE_LOW 0x04 -#define SYS_INFO_LVDSMISC__HSYNC_ACTIVE_LOW 0x08 - -/********************************************************************************************************************** - ATOM_INTEGRATED_SYSTEM_INFO_V6 Description -ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock -ulDentistVCOFreq: Dentist VCO clock in 10kHz unit. -ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit. -sDISPCLK_Voltage: Report Display clock voltage requirement. - -ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Liano/Ontaio projects: - ATOM_DEVICE_CRT1_SUPPORT 0x0001 - ATOM_DEVICE_CRT2_SUPPORT 0x0010 - ATOM_DEVICE_DFP1_SUPPORT 0x0008 - ATOM_DEVICE_DFP6_SUPPORT 0x0040 - ATOM_DEVICE_DFP2_SUPPORT 0x0080 - ATOM_DEVICE_DFP3_SUPPORT 0x0200 - ATOM_DEVICE_DFP4_SUPPORT 0x0400 - ATOM_DEVICE_DFP5_SUPPORT 0x0800 - ATOM_DEVICE_LCD1_SUPPORT 0x0002 -ulOtherDisplayMisc: Other display related flags, not defined yet. -ulGPUCapInfo: bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode. - =1: TMDS/HDMI Coherent Mode use signel PLL mode. - bit[3]=0: Enable HW AUX mode detection logic - =1: Disable HW AUX mode dettion logic -ulSB_MMIO_Base_Addr: Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage. - -usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). - Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0; - - When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below: - 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use; - VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result, - Changing BL using VBIOS function is functional in both driver and non-driver present environment; - and enabling VariBri under the driver environment from PP table is optional. - - 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating - that BL control from GPU is expected. - VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1 - Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but - it's per platform - and enabling VariBri under the driver environment from PP table is optional. - -ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt. - Threshold on value to enter HTC_active state. -ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt. - To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt. -ulMinEngineClock: Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings. -ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled - =1: PCIE Power Gating Enabled - Bit[1]=0: DDR-DLL shut-down feature disabled. - 1: DDR-DLL shut-down feature enabled. - Bit[2]=0: DDR-PLL Power down feature disabled. - 1: DDR-PLL Power down feature enabled. -ulCPUCapInfo: TBD -usNBP0Voltage: VID for voltage on NB P0 State -usNBP1Voltage: VID for voltage on NB P1 State -usBootUpNBVoltage: Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement. -usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure -usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set - to indicate a range. - SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004 - SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008 - SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010 - SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020 -ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved. -ucUMAChannelNumber: System memory channel numbers. -ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default -ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback. -ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications. -sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high -ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. -ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz. -ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz. -ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns. -ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns. -usPCIEClkSSPercentage: PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%. -usPCIEClkSSType: PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread. -usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. -usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. -usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting. -usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. -usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting. -usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. -usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz -ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode - [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped - [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color - [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used - [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low ) -**********************************************************************************************************************/ - -// this Table is used for Liano/Ontario APU -typedef struct _ATOM_FUSION_SYSTEM_INFO_V1 -{ - ATOM_INTEGRATED_SYSTEM_INFO_V6 sIntegratedSysInfo; - ULONG ulPowerplayTable[128]; -}ATOM_FUSION_SYSTEM_INFO_V1; - - -typedef struct _ATOM_TDP_CONFIG_BITS -{ -#if ATOM_BIG_ENDIAN - ULONG uReserved:2; - ULONG uTDP_Value:14; // Original TDP value in tens of milli watts - ULONG uCTDP_Value:14; // Override value in tens of milli watts - ULONG uCTDP_Enable:2; // = (uCTDP_Value > uTDP_Value? 2: (uCTDP_Value < uTDP_Value)) -#else - ULONG uCTDP_Enable:2; // = (uCTDP_Value > uTDP_Value? 2: (uCTDP_Value < uTDP_Value)) - ULONG uCTDP_Value:14; // Override value in tens of milli watts - ULONG uTDP_Value:14; // Original TDP value in tens of milli watts - ULONG uReserved:2; -#endif -}ATOM_TDP_CONFIG_BITS; - -typedef union _ATOM_TDP_CONFIG -{ - ATOM_TDP_CONFIG_BITS TDP_config; - ULONG TDP_config_all; -}ATOM_TDP_CONFIG; - -/********************************************************************************************************************** - ATOM_FUSION_SYSTEM_INFO_V1 Description -sIntegratedSysInfo: refer to ATOM_INTEGRATED_SYSTEM_INFO_V6 definition. -ulPowerplayTable[128]: This 512 bytes memory is used to save ATOM_PPLIB_POWERPLAYTABLE3, starting form ulPowerplayTable[0] -**********************************************************************************************************************/ - -// this IntegrateSystemInfoTable is used for Trinity APU -typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulBootUpEngineClock; - ULONG ulDentistVCOFreq; - ULONG ulBootUpUMAClock; - ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4]; - ULONG ulBootUpReqDisplayVector; - ULONG ulOtherDisplayMisc; - ULONG ulGPUCapInfo; - ULONG ulSB_MMIO_Base_Addr; - USHORT usRequestedPWMFreqInHz; - UCHAR ucHtcTmpLmt; - UCHAR ucHtcHystLmt; - ULONG ulMinEngineClock; - ULONG ulSystemConfig; - ULONG ulCPUCapInfo; - USHORT usNBP0Voltage; - USHORT usNBP1Voltage; - USHORT usBootUpNBVoltage; - USHORT usExtDispConnInfoOffset; - USHORT usPanelRefreshRateRange; - UCHAR ucMemoryType; - UCHAR ucUMAChannelNumber; - UCHAR strVBIOSMsg[40]; - ATOM_TDP_CONFIG asTdpConfig; - ULONG ulReserved[19]; - ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5]; - ULONG ulGMCRestoreResetTime; - ULONG ulMinimumNClk; - ULONG ulIdleNClk; - ULONG ulDDR_DLL_PowerUpTime; - ULONG ulDDR_PLL_PowerUpTime; - USHORT usPCIEClkSSPercentage; - USHORT usPCIEClkSSType; - USHORT usLvdsSSPercentage; - USHORT usLvdsSSpreadRateIn10Hz; - USHORT usHDMISSPercentage; - USHORT usHDMISSpreadRateIn10Hz; - USHORT usDVISSPercentage; - USHORT usDVISSpreadRateIn10Hz; - ULONG SclkDpmBoostMargin; - ULONG SclkDpmThrottleMargin; - USHORT SclkDpmTdpLimitPG; - USHORT SclkDpmTdpLimitBoost; - ULONG ulBoostEngineCLock; - UCHAR ulBoostVid_2bit; - UCHAR EnableBoost; - USHORT GnbTdpLimit; - USHORT usMaxLVDSPclkFreqInSingleLink; - UCHAR ucLvdsMisc; - UCHAR ucTravisLVDSVolAdjust; - UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms; - UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms; - UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms; - UCHAR ucLVDSPwrOffSeqDEtoDIGON_in4Ms; - UCHAR ucLVDSOffToOnDelay_in4Ms; - UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms; - UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms; - UCHAR ucMinAllowedBL_Level; - ULONG ulLCDBitDepthControlVal; - ULONG ulNbpStateMemclkFreq[4]; - USHORT usNBP2Voltage; - USHORT usNBP3Voltage; - ULONG ulNbpStateNClkFreq[4]; - UCHAR ucNBDPMEnable; - UCHAR ucReserved[3]; - UCHAR ucDPMState0VclkFid; - UCHAR ucDPMState0DclkFid; - UCHAR ucDPMState1VclkFid; - UCHAR ucDPMState1DclkFid; - UCHAR ucDPMState2VclkFid; - UCHAR ucDPMState2DclkFid; - UCHAR ucDPMState3VclkFid; - UCHAR ucDPMState3DclkFid; - ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo; -}ATOM_INTEGRATED_SYSTEM_INFO_V1_7; - -// ulOtherDisplayMisc -#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01 -#define INTEGRATED_SYSTEM_INFO__GET_BOOTUP_DISPLAY_CALLBACK_FUNC_SUPPORT 0x02 -#define INTEGRATED_SYSTEM_INFO__GET_EXPANSION_CALLBACK_FUNC_SUPPORT 0x04 -#define INTEGRATED_SYSTEM_INFO__FAST_BOOT_SUPPORT 0x08 - -// ulGPUCapInfo -#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01 -#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02 -#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08 -#define SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS 0x10 -//ulGPUCapInfo[16]=1 indicate SMC firmware is able to support GNB fast resume function, so that driver can call SMC to program most of GNB register during resuming, from ML -#define SYS_INFO_GPUCAPS__GNB_FAST_RESUME_CAPABLE 0x00010000 - -//ulGPUCapInfo[17]=1 indicate battery boost feature is enable, from ML -#define SYS_INFO_GPUCAPS__BATTERY_BOOST_ENABLE 0x00020000 - -/********************************************************************************************************************** - ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description -ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock -ulDentistVCOFreq: Dentist VCO clock in 10kHz unit. -ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit. -sDISPCLK_Voltage: Report Display clock voltage requirement. - -ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Trinity projects: - ATOM_DEVICE_CRT1_SUPPORT 0x0001 - ATOM_DEVICE_DFP1_SUPPORT 0x0008 - ATOM_DEVICE_DFP6_SUPPORT 0x0040 - ATOM_DEVICE_DFP2_SUPPORT 0x0080 - ATOM_DEVICE_DFP3_SUPPORT 0x0200 - ATOM_DEVICE_DFP4_SUPPORT 0x0400 - ATOM_DEVICE_DFP5_SUPPORT 0x0800 - ATOM_DEVICE_LCD1_SUPPORT 0x0002 -ulOtherDisplayMisc: bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS. - =1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS. - bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS - =1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS - bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS - =1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS - bit[3]=0: VBIOS fast boot is disable - =1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open) -ulGPUCapInfo: bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode. - =1: TMDS/HDMI Coherent Mode use signel PLL mode. - bit[1]=0: DP mode use cascade PLL mode ( New for Trinity ) - =1: DP mode use single PLL mode - bit[3]=0: Enable AUX HW mode detection logic - =1: Disable AUX HW mode detection logic - -ulSB_MMIO_Base_Addr: Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage. - -usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). - Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0; - - When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below: - 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use; - VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result, - Changing BL using VBIOS function is functional in both driver and non-driver present environment; - and enabling VariBri under the driver environment from PP table is optional. - - 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating - that BL control from GPU is expected. - VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1 - Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but - it's per platform - and enabling VariBri under the driver environment from PP table is optional. - -ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt. - Threshold on value to enter HTC_active state. -ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt. - To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt. -ulMinEngineClock: Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings. -ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled - =1: PCIE Power Gating Enabled - Bit[1]=0: DDR-DLL shut-down feature disabled. - 1: DDR-DLL shut-down feature enabled. - Bit[2]=0: DDR-PLL Power down feature disabled. - 1: DDR-PLL Power down feature enabled. -ulCPUCapInfo: TBD -usNBP0Voltage: VID for voltage on NB P0 State -usNBP1Voltage: VID for voltage on NB P1 State -usNBP2Voltage: VID for voltage on NB P2 State -usNBP3Voltage: VID for voltage on NB P3 State -usBootUpNBVoltage: Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement. -usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure -usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set - to indicate a range. - SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004 - SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008 - SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010 - SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020 -ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved. -ucUMAChannelNumber: System memory channel numbers. -ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default -ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback. -ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications. -sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high -ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. -ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz. -ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz. -ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns. -ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns. -usPCIEClkSSPercentage: PCIE Clock Spread Spectrum Percentage in unit 0.01%; 100 mean 1%. -usPCIEClkSSType: PCIE Clock Spread Spectrum Type. 0 for Down spread(default); 1 for Center spread. -usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. -usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. -usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting. -usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. -usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting. -usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. -usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz -ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode - [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped - [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color - [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used - [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low ) - [bit5] Travid LVDS output voltage override enable, when =1, use ucTravisLVDSVolAdjust value to overwrite Traivs register LVDS_CTRL_4 -ucTravisLVDSVolAdjust When ucLVDSMisc[5]=1,it means platform SBIOS want to overwrite TravisLVDSVoltage. Then VBIOS will use ucTravisLVDSVolAdjust - value to program Travis register LVDS_CTRL_4 -ucLVDSPwrOnSeqDIGONtoDE_in4Ms: LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ). - =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON. - This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. -ucLVDSPwrOnDEtoVARY_BL_in4Ms: LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ). - =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON. - This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. - -ucLVDSPwrOffVARY_BLtoDE_in4Ms: LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off. - =0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON - This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. - -ucLVDSPwrOffDEtoDIGON_in4Ms: LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off. - =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON - This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. - -ucLVDSOffToOnDelay_in4Ms: LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active. - =0 means to use VBIOS default delay which is 125 ( 500ms ). - This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. - -ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms: - LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active. - =0 means to use VBIOS default delay which is 0 ( 0ms ). - This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. - -ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms: - LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off. - =0 means to use VBIOS default delay which is 0 ( 0ms ). - This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. - -ucMinAllowedBL_Level: Lowest LCD backlight PWM level. This is customer platform specific parameters. By default it is 0. - -ulNbpStateMemclkFreq[4]: system memory clock frequncey in unit of 10Khz in different NB pstate. - -**********************************************************************************************************************/ - -// this IntegrateSystemInfoTable is used for Kaveri & Kabini APU -typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulBootUpEngineClock; - ULONG ulDentistVCOFreq; - ULONG ulBootUpUMAClock; - ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4]; - ULONG ulBootUpReqDisplayVector; - ULONG ulVBIOSMisc; - ULONG ulGPUCapInfo; - ULONG ulDISP_CLK2Freq; - USHORT usRequestedPWMFreqInHz; - UCHAR ucHtcTmpLmt; - UCHAR ucHtcHystLmt; - ULONG ulReserved2; - ULONG ulSystemConfig; - ULONG ulCPUCapInfo; - ULONG ulReserved3; - USHORT usGPUReservedSysMemSize; - USHORT usExtDispConnInfoOffset; - USHORT usPanelRefreshRateRange; - UCHAR ucMemoryType; - UCHAR ucUMAChannelNumber; - UCHAR strVBIOSMsg[40]; - ATOM_TDP_CONFIG asTdpConfig; - ULONG ulReserved[19]; - ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5]; - ULONG ulGMCRestoreResetTime; - ULONG ulReserved4; - ULONG ulIdleNClk; - ULONG ulDDR_DLL_PowerUpTime; - ULONG ulDDR_PLL_PowerUpTime; - USHORT usPCIEClkSSPercentage; - USHORT usPCIEClkSSType; - USHORT usLvdsSSPercentage; - USHORT usLvdsSSpreadRateIn10Hz; - USHORT usHDMISSPercentage; - USHORT usHDMISSpreadRateIn10Hz; - USHORT usDVISSPercentage; - USHORT usDVISSpreadRateIn10Hz; - ULONG ulGPUReservedSysMemBaseAddrLo; - ULONG ulGPUReservedSysMemBaseAddrHi; - ATOM_CLK_VOLT_CAPABILITY s5thDISPCLK_Voltage; - ULONG ulReserved5; - USHORT usMaxLVDSPclkFreqInSingleLink; - UCHAR ucLvdsMisc; - UCHAR ucTravisLVDSVolAdjust; - UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms; - UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms; - UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms; - UCHAR ucLVDSPwrOffSeqDEtoDIGON_in4Ms; - UCHAR ucLVDSOffToOnDelay_in4Ms; - UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms; - UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms; - UCHAR ucMinAllowedBL_Level; - ULONG ulLCDBitDepthControlVal; - ULONG ulNbpStateMemclkFreq[4]; - ULONG ulPSPVersion; - ULONG ulNbpStateNClkFreq[4]; - USHORT usNBPStateVoltage[4]; - USHORT usBootUpNBVoltage; - USHORT usReserved2; - ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo; -}ATOM_INTEGRATED_SYSTEM_INFO_V1_8; - -/********************************************************************************************************************** - ATOM_INTEGRATED_SYSTEM_INFO_V1_8 Description -ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock -ulDentistVCOFreq: Dentist VCO clock in 10kHz unit. -ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit. -sDISPCLK_Voltage: Report Display clock frequency requirement on GNB voltage(up to 4 voltage levels). - -ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Trinity projects: - ATOM_DEVICE_CRT1_SUPPORT 0x0001 - ATOM_DEVICE_DFP1_SUPPORT 0x0008 - ATOM_DEVICE_DFP6_SUPPORT 0x0040 - ATOM_DEVICE_DFP2_SUPPORT 0x0080 - ATOM_DEVICE_DFP3_SUPPORT 0x0200 - ATOM_DEVICE_DFP4_SUPPORT 0x0400 - ATOM_DEVICE_DFP5_SUPPORT 0x0800 - ATOM_DEVICE_LCD1_SUPPORT 0x0002 - -ulVBIOSMisc: Miscellenous flags for VBIOS requirement and interface - bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS. - =1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS. - bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS - =1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS - bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS - =1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS - bit[3]=0: VBIOS fast boot is disable - =1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open) - -ulGPUCapInfo: bit[0~2]= Reserved - bit[3]=0: Enable AUX HW mode detection logic - =1: Disable AUX HW mode detection logic - bit[4]=0: Disable DFS bypass feature - =1: Enable DFS bypass feature - -usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). - Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0; - - When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below: - 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use; - VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result, - Changing BL using VBIOS function is functional in both driver and non-driver present environment; - and enabling VariBri under the driver environment from PP table is optional. - - 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating - that BL control from GPU is expected. - VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1 - Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but - it's per platform - and enabling VariBri under the driver environment from PP table is optional. - -ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt. Threshold on value to enter HTC_active state. -ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt. - To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt. - -ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled - =1: PCIE Power Gating Enabled - Bit[1]=0: DDR-DLL shut-down feature disabled. - 1: DDR-DLL shut-down feature enabled. - Bit[2]=0: DDR-PLL Power down feature disabled. - 1: DDR-PLL Power down feature enabled. - Bit[3]=0: GNB DPM is disabled - =1: GNB DPM is enabled -ulCPUCapInfo: TBD - -usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure -usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set - to indicate a range. - SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004 - SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008 - SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010 - SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020 - -ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3;=5:GDDR5; [7:4] is reserved. -ucUMAChannelNumber: System memory channel numbers. - -strVBIOSMsg[40]: VBIOS boot up customized message string - -sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high - -ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. -ulIdleNClk: NCLK speed while memory runs in self-refresh state, used to calculate self-refresh latency. Unit in 10kHz. -ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns. -ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns. - -usPCIEClkSSPercentage: PCIE Clock Spread Spectrum Percentage in unit 0.01%; 100 mean 1%. -usPCIEClkSSType: PCIE Clock Spread Spectrum Type. 0 for Down spread(default); 1 for Center spread. -usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. -usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. -usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting. -usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. -usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting. -usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. - -usGPUReservedSysMemSize: Reserved system memory size for ACP engine in APU GNB, units in MB. 0/2/4MB based on CMOS options, current default could be 0MB. KV only, not on KB. -ulGPUReservedSysMemBaseAddrLo: Low 32 bits base address to the reserved system memory. -ulGPUReservedSysMemBaseAddrHi: High 32 bits base address to the reserved system memory. - -usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz -ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode - [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped - [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color - [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used - [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low ) - [bit5] Travid LVDS output voltage override enable, when =1, use ucTravisLVDSVolAdjust value to overwrite Traivs register LVDS_CTRL_4 -ucTravisLVDSVolAdjust When ucLVDSMisc[5]=1,it means platform SBIOS want to overwrite TravisLVDSVoltage. Then VBIOS will use ucTravisLVDSVolAdjust - value to program Travis register LVDS_CTRL_4 -ucLVDSPwrOnSeqDIGONtoDE_in4Ms: - LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ). - =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON. - This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. -ucLVDSPwrOnDEtoVARY_BL_in4Ms: - LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ). - =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON. - This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. -ucLVDSPwrOffVARY_BLtoDE_in4Ms: - LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off. - =0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON - This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. -ucLVDSPwrOffDEtoDIGON_in4Ms: - LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off. - =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON - This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. -ucLVDSOffToOnDelay_in4Ms: - LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active. - =0 means to use VBIOS default delay which is 125 ( 500ms ). - This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. -ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms: - LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active. - =0 means to use VBIOS default delay which is 0 ( 0ms ). - This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. - -ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms: - LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off. - =0 means to use VBIOS default delay which is 0 ( 0ms ). - This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. -ucMinAllowedBL_Level: Lowest LCD backlight PWM level. This is customer platform specific parameters. By default it is 0. - -ulLCDBitDepthControlVal: GPU display control encoder bit dither control setting, used to program register mmFMT_BIT_DEPTH_CONTROL - -ulNbpStateMemclkFreq[4]: system memory clock frequncey in unit of 10Khz in different NB P-State(P0, P1, P2 & P3). -ulNbpStateNClkFreq[4]: NB P-State NClk frequency in different NB P-State -usNBPStateVoltage[4]: NB P-State (P0/P1 & P2/P3) voltage; NBP3 refers to lowes voltage -usBootUpNBVoltage: NB P-State voltage during boot up before driver loaded -sExtDispConnInfo: Display connector information table provided to VBIOS - -**********************************************************************************************************************/ - -// this Table is used for Kaveri/Kabini APU -typedef struct _ATOM_FUSION_SYSTEM_INFO_V2 -{ - ATOM_INTEGRATED_SYSTEM_INFO_V1_8 sIntegratedSysInfo; // refer to ATOM_INTEGRATED_SYSTEM_INFO_V1_8 definition - ULONG ulPowerplayTable[128]; // Update comments here to link new powerplay table definition structure -}ATOM_FUSION_SYSTEM_INFO_V2; - - -typedef struct _ATOM_I2C_REG_INFO -{ - UCHAR ucI2cRegIndex; - UCHAR ucI2cRegVal; -}ATOM_I2C_REG_INFO; - -// this IntegrateSystemInfoTable is used for Carrizo -typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulBootUpEngineClock; - ULONG ulDentistVCOFreq; - ULONG ulBootUpUMAClock; - ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4]; // no longer used, keep it as is to avoid driver compiling error - ULONG ulBootUpReqDisplayVector; - ULONG ulVBIOSMisc; - ULONG ulGPUCapInfo; - ULONG ulDISP_CLK2Freq; - USHORT usRequestedPWMFreqInHz; - UCHAR ucHtcTmpLmt; - UCHAR ucHtcHystLmt; - ULONG ulReserved2; - ULONG ulSystemConfig; - ULONG ulCPUCapInfo; - ULONG ulReserved3; - USHORT usGPUReservedSysMemSize; - USHORT usExtDispConnInfoOffset; - USHORT usPanelRefreshRateRange; - UCHAR ucMemoryType; - UCHAR ucUMAChannelNumber; - UCHAR strVBIOSMsg[40]; - ATOM_TDP_CONFIG asTdpConfig; - UCHAR ucExtHDMIReDrvSlvAddr; - UCHAR ucExtHDMIReDrvRegNum; - ATOM_I2C_REG_INFO asExtHDMIRegSetting[9]; - ULONG ulReserved[2]; - ATOM_CLK_VOLT_CAPABILITY_V2 sDispClkVoltageMapping[8]; - ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5]; // no longer used, keep it as is to avoid driver compiling error - ULONG ulGMCRestoreResetTime; - ULONG ulReserved4; - ULONG ulIdleNClk; - ULONG ulDDR_DLL_PowerUpTime; - ULONG ulDDR_PLL_PowerUpTime; - USHORT usPCIEClkSSPercentage; - USHORT usPCIEClkSSType; - USHORT usLvdsSSPercentage; - USHORT usLvdsSSpreadRateIn10Hz; - USHORT usHDMISSPercentage; - USHORT usHDMISSpreadRateIn10Hz; - USHORT usDVISSPercentage; - USHORT usDVISSpreadRateIn10Hz; - ULONG ulGPUReservedSysMemBaseAddrLo; - ULONG ulGPUReservedSysMemBaseAddrHi; - ULONG ulReserved5[3]; - USHORT usMaxLVDSPclkFreqInSingleLink; - UCHAR ucLvdsMisc; - UCHAR ucTravisLVDSVolAdjust; - UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms; - UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms; - UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms; - UCHAR ucLVDSPwrOffSeqDEtoDIGON_in4Ms; - UCHAR ucLVDSOffToOnDelay_in4Ms; - UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms; - UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms; - UCHAR ucMinAllowedBL_Level; - ULONG ulLCDBitDepthControlVal; - ULONG ulNbpStateMemclkFreq[4]; // only 2 level is changed. - ULONG ulPSPVersion; - ULONG ulNbpStateNClkFreq[4]; - USHORT usNBPStateVoltage[4]; - USHORT usBootUpNBVoltage; - UCHAR ucEDPv1_4VSMode; - UCHAR ucReserved2; - ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo; -}ATOM_INTEGRATED_SYSTEM_INFO_V1_9; - - -// definition for ucEDPv1_4VSMode -#define EDP_VS_LEGACY_MODE 0 -#define EDP_VS_LOW_VDIFF_MODE 1 -#define EDP_VS_HIGH_VDIFF_MODE 2 -#define EDP_VS_STRETCH_MODE 3 -#define EDP_VS_SINGLE_VDIFF_MODE 4 -#define EDP_VS_VARIABLE_PREM_MODE 5 - - -// this IntegrateSystemInfoTable is used for Carrizo -typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_10 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulBootUpEngineClock; - ULONG ulDentistVCOFreq; - ULONG ulBootUpUMAClock; - ULONG ulReserved0[8]; - ULONG ulBootUpReqDisplayVector; - ULONG ulVBIOSMisc; - ULONG ulGPUCapInfo; - ULONG ulReserved1; - USHORT usRequestedPWMFreqInHz; - UCHAR ucHtcTmpLmt; - UCHAR ucHtcHystLmt; - ULONG ulReserved2; - ULONG ulSystemConfig; - ULONG ulCPUCapInfo; - ULONG ulReserved3; - USHORT usGPUReservedSysMemSize; - USHORT usExtDispConnInfoOffset; - USHORT usPanelRefreshRateRange; - UCHAR ucMemoryType; - UCHAR ucUMAChannelNumber; - UCHAR strVBIOSMsg[40]; - ATOM_TDP_CONFIG asTdpConfig; - ULONG ulReserved[7]; - ATOM_CLK_VOLT_CAPABILITY_V2 sDispClkVoltageMapping[8]; - ULONG ulReserved6[10]; - ULONG ulGMCRestoreResetTime; - ULONG ulReserved4; - ULONG ulIdleNClk; - ULONG ulDDR_DLL_PowerUpTime; - ULONG ulDDR_PLL_PowerUpTime; - USHORT usPCIEClkSSPercentage; - USHORT usPCIEClkSSType; - USHORT usLvdsSSPercentage; - USHORT usLvdsSSpreadRateIn10Hz; - USHORT usHDMISSPercentage; - USHORT usHDMISSpreadRateIn10Hz; - USHORT usDVISSPercentage; - USHORT usDVISSpreadRateIn10Hz; - ULONG ulGPUReservedSysMemBaseAddrLo; - ULONG ulGPUReservedSysMemBaseAddrHi; - ULONG ulReserved5[3]; - USHORT usMaxLVDSPclkFreqInSingleLink; - UCHAR ucLvdsMisc; - UCHAR ucTravisLVDSVolAdjust; - UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms; - UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms; - UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms; - UCHAR ucLVDSPwrOffSeqDEtoDIGON_in4Ms; - UCHAR ucLVDSOffToOnDelay_in4Ms; - UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms; - UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms; - UCHAR ucMinAllowedBL_Level; - ULONG ulLCDBitDepthControlVal; - ULONG ulNbpStateMemclkFreq[2]; - ULONG ulReserved7[2]; - ULONG ulPSPVersion; - ULONG ulNbpStateNClkFreq[4]; - USHORT usNBPStateVoltage[4]; - USHORT usBootUpNBVoltage; - UCHAR ucEDPv1_4VSMode; - UCHAR ucReserved2; - ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo; -}ATOM_INTEGRATED_SYSTEM_INFO_V1_10; - -/**************************************************************************/ -// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design -//Memory SS Info Table -//Define Memory Clock SS chip ID -#define ICS91719 1 -#define ICS91720 2 - -//Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol -typedef struct _ATOM_I2C_DATA_RECORD -{ - UCHAR ucNunberOfBytes; //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop" - UCHAR ucI2CData[1]; //I2C data in bytes, should be less than 16 bytes usually -}ATOM_I2C_DATA_RECORD; - - -//Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information -typedef struct _ATOM_I2C_DEVICE_SETUP_INFO -{ - ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //I2C line and HW/SW assisted cap. - UCHAR ucSSChipID; //SS chip being used - UCHAR ucSSChipSlaveAddr; //Slave Address to set up this SS chip - UCHAR ucNumOfI2CDataRecords; //number of data block - ATOM_I2C_DATA_RECORD asI2CData[1]; -}ATOM_I2C_DEVICE_SETUP_INFO; - -//========================================================================================== -typedef struct _ATOM_ASIC_MVDD_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1]; -}ATOM_ASIC_MVDD_INFO; - -//========================================================================================== -#define ATOM_MCLK_SS_INFO ATOM_ASIC_MVDD_INFO - -//========================================================================================== -/**************************************************************************/ - -typedef struct _ATOM_ASIC_SS_ASSIGNMENT -{ - ULONG ulTargetClockRange; //Clock Out frequence (VCO ), in unit of 10Khz - USHORT usSpreadSpectrumPercentage; //in unit of 0.01% - USHORT usSpreadRateInKhz; //in unit of kHz, modulation freq - UCHAR ucClockIndication; //Indicate which clock source needs SS - UCHAR ucSpreadSpectrumMode; //Bit1=0 Down Spread,=1 Center Spread. - UCHAR ucReserved[2]; -}ATOM_ASIC_SS_ASSIGNMENT; - -//Define ucClockIndication, SW uses the IDs below to search if the SS is requried/enabled on a clock branch/signal type. -//SS is not required or enabled if a match is not found. -#define ASIC_INTERNAL_MEMORY_SS 1 -#define ASIC_INTERNAL_ENGINE_SS 2 -#define ASIC_INTERNAL_UVD_SS 3 -#define ASIC_INTERNAL_SS_ON_TMDS 4 -#define ASIC_INTERNAL_SS_ON_HDMI 5 -#define ASIC_INTERNAL_SS_ON_LVDS 6 -#define ASIC_INTERNAL_SS_ON_DP 7 -#define ASIC_INTERNAL_SS_ON_DCPLL 8 -#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9 -#define ASIC_INTERNAL_VCE_SS 10 -#define ASIC_INTERNAL_GPUPLL_SS 11 - - -typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2 -{ - ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz - //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 ) - USHORT usSpreadSpectrumPercentage; //in unit of 0.01% - USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq - UCHAR ucClockIndication; //Indicate which clock source needs SS - UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS - UCHAR ucReserved[2]; -}ATOM_ASIC_SS_ASSIGNMENT_V2; - -//ucSpreadSpectrumMode -//#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000 -//#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000 -//#define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001 -//#define ATOM_SS_CENTRE_SPREAD_MODE 0x00000001 -//#define ATOM_INTERNAL_SS_MASK 0x00000000 -//#define ATOM_EXTERNAL_SS_MASK 0x00000002 - -typedef struct _ATOM_ASIC_INTERNAL_SS_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4]; -}ATOM_ASIC_INTERNAL_SS_INFO; - -typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_ASIC_SS_ASSIGNMENT_V2 asSpreadSpectrum[1]; //this is point only. -}ATOM_ASIC_INTERNAL_SS_INFO_V2; - -typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3 -{ - ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz - //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 ) - USHORT usSpreadSpectrumPercentage; //in unit of 0.01% or 0.001%, decided by ucSpreadSpectrumMode bit4 - USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq - UCHAR ucClockIndication; //Indicate which clock source needs SS - UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS - UCHAR ucReserved[2]; -}ATOM_ASIC_SS_ASSIGNMENT_V3; - -//ATOM_ASIC_SS_ASSIGNMENT_V3.ucSpreadSpectrumMode -#define SS_MODE_V3_CENTRE_SPREAD_MASK 0x01 -#define SS_MODE_V3_EXTERNAL_SS_MASK 0x02 -#define SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK 0x10 - -typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_ASIC_SS_ASSIGNMENT_V3 asSpreadSpectrum[1]; //this is pointer only. -}ATOM_ASIC_INTERNAL_SS_INFO_V3; - - -//==============================Scratch Pad Definition Portion=============================== -#define ATOM_DEVICE_CONNECT_INFO_DEF 0 -#define ATOM_ROM_LOCATION_DEF 1 -#define ATOM_TV_STANDARD_DEF 2 -#define ATOM_ACTIVE_INFO_DEF 3 -#define ATOM_LCD_INFO_DEF 4 -#define ATOM_DOS_REQ_INFO_DEF 5 -#define ATOM_ACC_CHANGE_INFO_DEF 6 -#define ATOM_DOS_MODE_INFO_DEF 7 -#define ATOM_I2C_CHANNEL_STATUS_DEF 8 -#define ATOM_I2C_CHANNEL_STATUS1_DEF 9 -#define ATOM_INTERNAL_TIMER_DEF 10 - -// BIOS_0_SCRATCH Definition -#define ATOM_S0_CRT1_MONO 0x00000001L -#define ATOM_S0_CRT1_COLOR 0x00000002L -#define ATOM_S0_CRT1_MASK (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR) - -#define ATOM_S0_TV1_COMPOSITE_A 0x00000004L -#define ATOM_S0_TV1_SVIDEO_A 0x00000008L -#define ATOM_S0_TV1_MASK_A (ATOM_S0_TV1_COMPOSITE_A+ATOM_S0_TV1_SVIDEO_A) - -#define ATOM_S0_CV_A 0x00000010L -#define ATOM_S0_CV_DIN_A 0x00000020L -#define ATOM_S0_CV_MASK_A (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A) - - -#define ATOM_S0_CRT2_MONO 0x00000100L -#define ATOM_S0_CRT2_COLOR 0x00000200L -#define ATOM_S0_CRT2_MASK (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR) - -#define ATOM_S0_TV1_COMPOSITE 0x00000400L -#define ATOM_S0_TV1_SVIDEO 0x00000800L -#define ATOM_S0_TV1_SCART 0x00004000L -#define ATOM_S0_TV1_MASK (ATOM_S0_TV1_COMPOSITE+ATOM_S0_TV1_SVIDEO+ATOM_S0_TV1_SCART) - -#define ATOM_S0_CV 0x00001000L -#define ATOM_S0_CV_DIN 0x00002000L -#define ATOM_S0_CV_MASK (ATOM_S0_CV+ATOM_S0_CV_DIN) - -#define ATOM_S0_DFP1 0x00010000L -#define ATOM_S0_DFP2 0x00020000L -#define ATOM_S0_LCD1 0x00040000L -#define ATOM_S0_LCD2 0x00080000L -#define ATOM_S0_DFP6 0x00100000L -#define ATOM_S0_DFP3 0x00200000L -#define ATOM_S0_DFP4 0x00400000L -#define ATOM_S0_DFP5 0x00800000L - - -#define ATOM_S0_DFP_MASK ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5 | ATOM_S0_DFP6 - -#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L // If set, indicates we are running a PCIE asic with - // the FAD/HDP reg access bug. Bit is read by DAL, this is obsolete from RV5xx - -#define ATOM_S0_THERMAL_STATE_MASK 0x1C000000L -#define ATOM_S0_THERMAL_STATE_SHIFT 26 - -#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L -#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29 - -#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1 -#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2 -#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3 -#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LIT2AC 4 - -//Byte aligned defintion for BIOS usage -#define ATOM_S0_CRT1_MONOb0 0x01 -#define ATOM_S0_CRT1_COLORb0 0x02 -#define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0) - -#define ATOM_S0_TV1_COMPOSITEb0 0x04 -#define ATOM_S0_TV1_SVIDEOb0 0x08 -#define ATOM_S0_TV1_MASKb0 (ATOM_S0_TV1_COMPOSITEb0+ATOM_S0_TV1_SVIDEOb0) - -#define ATOM_S0_CVb0 0x10 -#define ATOM_S0_CV_DINb0 0x20 -#define ATOM_S0_CV_MASKb0 (ATOM_S0_CVb0+ATOM_S0_CV_DINb0) - -#define ATOM_S0_CRT2_MONOb1 0x01 -#define ATOM_S0_CRT2_COLORb1 0x02 -#define ATOM_S0_CRT2_MASKb1 (ATOM_S0_CRT2_MONOb1+ATOM_S0_CRT2_COLORb1) - -#define ATOM_S0_TV1_COMPOSITEb1 0x04 -#define ATOM_S0_TV1_SVIDEOb1 0x08 -#define ATOM_S0_TV1_SCARTb1 0x40 -#define ATOM_S0_TV1_MASKb1 (ATOM_S0_TV1_COMPOSITEb1+ATOM_S0_TV1_SVIDEOb1+ATOM_S0_TV1_SCARTb1) - -#define ATOM_S0_CVb1 0x10 -#define ATOM_S0_CV_DINb1 0x20 -#define ATOM_S0_CV_MASKb1 (ATOM_S0_CVb1+ATOM_S0_CV_DINb1) - -#define ATOM_S0_DFP1b2 0x01 -#define ATOM_S0_DFP2b2 0x02 -#define ATOM_S0_LCD1b2 0x04 -#define ATOM_S0_LCD2b2 0x08 -#define ATOM_S0_DFP6b2 0x10 -#define ATOM_S0_DFP3b2 0x20 -#define ATOM_S0_DFP4b2 0x40 -#define ATOM_S0_DFP5b2 0x80 - - -#define ATOM_S0_THERMAL_STATE_MASKb3 0x1C -#define ATOM_S0_THERMAL_STATE_SHIFTb3 2 - -#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0 -#define ATOM_S0_LCD1_SHIFT 18 - -// BIOS_1_SCRATCH Definition -#define ATOM_S1_ROM_LOCATION_MASK 0x0000FFFFL -#define ATOM_S1_PCI_BUS_DEV_MASK 0xFFFF0000L - -// BIOS_2_SCRATCH Definition -#define ATOM_S2_TV1_STANDARD_MASK 0x0000000FL -#define ATOM_S2_CURRENT_BL_LEVEL_MASK 0x0000FF00L -#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT 8 - -#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK 0x0C000000L -#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26 -#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE 0x10000000L - -#define ATOM_S2_DEVICE_DPMS_STATE 0x00010000L -#define ATOM_S2_VRI_BRIGHT_ENABLE 0x20000000L - -#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE 0x0 -#define ATOM_S2_DISPLAY_ROTATION_90_DEGREE 0x1 -#define ATOM_S2_DISPLAY_ROTATION_180_DEGREE 0x2 -#define ATOM_S2_DISPLAY_ROTATION_270_DEGREE 0x3 -#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30 -#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L - - -//Byte aligned defintion for BIOS usage -#define ATOM_S2_TV1_STANDARD_MASKb0 0x0F -#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF -#define ATOM_S2_DEVICE_DPMS_STATEb2 0x01 - -#define ATOM_S2_TMDS_COHERENT_MODEb3 0x10 // used by VBIOS code only, use coherent mode for TMDS/HDMI mode -#define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20 -#define ATOM_S2_ROTATION_STATE_MASKb3 0xC0 - - -// BIOS_3_SCRATCH Definition -#define ATOM_S3_CRT1_ACTIVE 0x00000001L -#define ATOM_S3_LCD1_ACTIVE 0x00000002L -#define ATOM_S3_TV1_ACTIVE 0x00000004L -#define ATOM_S3_DFP1_ACTIVE 0x00000008L -#define ATOM_S3_CRT2_ACTIVE 0x00000010L -#define ATOM_S3_LCD2_ACTIVE 0x00000020L -#define ATOM_S3_DFP6_ACTIVE 0x00000040L -#define ATOM_S3_DFP2_ACTIVE 0x00000080L -#define ATOM_S3_CV_ACTIVE 0x00000100L -#define ATOM_S3_DFP3_ACTIVE 0x00000200L -#define ATOM_S3_DFP4_ACTIVE 0x00000400L -#define ATOM_S3_DFP5_ACTIVE 0x00000800L - - -#define ATOM_S3_DEVICE_ACTIVE_MASK 0x00000FFFL - -#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE 0x00001000L -#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L - -#define ATOM_S3_CRT1_CRTC_ACTIVE 0x00010000L -#define ATOM_S3_LCD1_CRTC_ACTIVE 0x00020000L -#define ATOM_S3_TV1_CRTC_ACTIVE 0x00040000L -#define ATOM_S3_DFP1_CRTC_ACTIVE 0x00080000L -#define ATOM_S3_CRT2_CRTC_ACTIVE 0x00100000L -#define ATOM_S3_LCD2_CRTC_ACTIVE 0x00200000L -#define ATOM_S3_DFP6_CRTC_ACTIVE 0x00400000L -#define ATOM_S3_DFP2_CRTC_ACTIVE 0x00800000L -#define ATOM_S3_CV_CRTC_ACTIVE 0x01000000L -#define ATOM_S3_DFP3_CRTC_ACTIVE 0x02000000L -#define ATOM_S3_DFP4_CRTC_ACTIVE 0x04000000L -#define ATOM_S3_DFP5_CRTC_ACTIVE 0x08000000L - - -#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L -#define ATOM_S3_ASIC_GUI_ENGINE_HUNG 0x20000000L -//Below two definitions are not supported in pplib, but in the old powerplay in DAL -#define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L -#define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L - - - -//Byte aligned defintion for BIOS usage -#define ATOM_S3_CRT1_ACTIVEb0 0x01 -#define ATOM_S3_LCD1_ACTIVEb0 0x02 -#define ATOM_S3_TV1_ACTIVEb0 0x04 -#define ATOM_S3_DFP1_ACTIVEb0 0x08 -#define ATOM_S3_CRT2_ACTIVEb0 0x10 -#define ATOM_S3_LCD2_ACTIVEb0 0x20 -#define ATOM_S3_DFP6_ACTIVEb0 0x40 -#define ATOM_S3_DFP2_ACTIVEb0 0x80 -#define ATOM_S3_CV_ACTIVEb1 0x01 -#define ATOM_S3_DFP3_ACTIVEb1 0x02 -#define ATOM_S3_DFP4_ACTIVEb1 0x04 -#define ATOM_S3_DFP5_ACTIVEb1 0x08 - - -#define ATOM_S3_ACTIVE_CRTC1w0 0xFFF - -#define ATOM_S3_CRT1_CRTC_ACTIVEb2 0x01 -#define ATOM_S3_LCD1_CRTC_ACTIVEb2 0x02 -#define ATOM_S3_TV1_CRTC_ACTIVEb2 0x04 -#define ATOM_S3_DFP1_CRTC_ACTIVEb2 0x08 -#define ATOM_S3_CRT2_CRTC_ACTIVEb2 0x10 -#define ATOM_S3_LCD2_CRTC_ACTIVEb2 0x20 -#define ATOM_S3_DFP6_CRTC_ACTIVEb2 0x40 -#define ATOM_S3_DFP2_CRTC_ACTIVEb2 0x80 -#define ATOM_S3_CV_CRTC_ACTIVEb3 0x01 -#define ATOM_S3_DFP3_CRTC_ACTIVEb3 0x02 -#define ATOM_S3_DFP4_CRTC_ACTIVEb3 0x04 -#define ATOM_S3_DFP5_CRTC_ACTIVEb3 0x08 - - -#define ATOM_S3_ACTIVE_CRTC2w1 0xFFF - - -// BIOS_4_SCRATCH Definition -#define ATOM_S4_LCD1_PANEL_ID_MASK 0x000000FFL -#define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L -#define ATOM_S4_LCD1_REFRESH_SHIFT 8 - -//Byte aligned defintion for BIOS usage -#define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF -#define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0 -#define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0 - -// BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!! -#define ATOM_S5_DOS_REQ_CRT1b0 0x01 -#define ATOM_S5_DOS_REQ_LCD1b0 0x02 -#define ATOM_S5_DOS_REQ_TV1b0 0x04 -#define ATOM_S5_DOS_REQ_DFP1b0 0x08 -#define ATOM_S5_DOS_REQ_CRT2b0 0x10 -#define ATOM_S5_DOS_REQ_LCD2b0 0x20 -#define ATOM_S5_DOS_REQ_DFP6b0 0x40 -#define ATOM_S5_DOS_REQ_DFP2b0 0x80 -#define ATOM_S5_DOS_REQ_CVb1 0x01 -#define ATOM_S5_DOS_REQ_DFP3b1 0x02 -#define ATOM_S5_DOS_REQ_DFP4b1 0x04 -#define ATOM_S5_DOS_REQ_DFP5b1 0x08 - - -#define ATOM_S5_DOS_REQ_DEVICEw0 0x0FFF - -#define ATOM_S5_DOS_REQ_CRT1 0x0001 -#define ATOM_S5_DOS_REQ_LCD1 0x0002 -#define ATOM_S5_DOS_REQ_TV1 0x0004 -#define ATOM_S5_DOS_REQ_DFP1 0x0008 -#define ATOM_S5_DOS_REQ_CRT2 0x0010 -#define ATOM_S5_DOS_REQ_LCD2 0x0020 -#define ATOM_S5_DOS_REQ_DFP6 0x0040 -#define ATOM_S5_DOS_REQ_DFP2 0x0080 -#define ATOM_S5_DOS_REQ_CV 0x0100 -#define ATOM_S5_DOS_REQ_DFP3 0x0200 -#define ATOM_S5_DOS_REQ_DFP4 0x0400 -#define ATOM_S5_DOS_REQ_DFP5 0x0800 - -#define ATOM_S5_DOS_FORCE_CRT1b2 ATOM_S5_DOS_REQ_CRT1b0 -#define ATOM_S5_DOS_FORCE_TV1b2 ATOM_S5_DOS_REQ_TV1b0 -#define ATOM_S5_DOS_FORCE_CRT2b2 ATOM_S5_DOS_REQ_CRT2b0 -#define ATOM_S5_DOS_FORCE_CVb3 ATOM_S5_DOS_REQ_CVb1 -#define ATOM_S5_DOS_FORCE_DEVICEw1 (ATOM_S5_DOS_FORCE_CRT1b2+ATOM_S5_DOS_FORCE_TV1b2+ATOM_S5_DOS_FORCE_CRT2b2+\ - (ATOM_S5_DOS_FORCE_CVb3<<8)) -// BIOS_6_SCRATCH Definition -#define ATOM_S6_DEVICE_CHANGE 0x00000001L -#define ATOM_S6_SCALER_CHANGE 0x00000002L -#define ATOM_S6_LID_CHANGE 0x00000004L -#define ATOM_S6_DOCKING_CHANGE 0x00000008L -#define ATOM_S6_ACC_MODE 0x00000010L -#define ATOM_S6_EXT_DESKTOP_MODE 0x00000020L -#define ATOM_S6_LID_STATE 0x00000040L -#define ATOM_S6_DOCK_STATE 0x00000080L -#define ATOM_S6_CRITICAL_STATE 0x00000100L -#define ATOM_S6_HW_I2C_BUSY_STATE 0x00000200L -#define ATOM_S6_THERMAL_STATE_CHANGE 0x00000400L -#define ATOM_S6_INTERRUPT_SET_BY_BIOS 0x00000800L -#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L //Normal expansion Request bit for LCD -#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L //Aspect ratio expansion Request bit for LCD - -#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L //This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion -#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L //This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion - -#define ATOM_S6_ACC_REQ_CRT1 0x00010000L -#define ATOM_S6_ACC_REQ_LCD1 0x00020000L -#define ATOM_S6_ACC_REQ_TV1 0x00040000L -#define ATOM_S6_ACC_REQ_DFP1 0x00080000L -#define ATOM_S6_ACC_REQ_CRT2 0x00100000L -#define ATOM_S6_ACC_REQ_LCD2 0x00200000L -#define ATOM_S6_ACC_REQ_DFP6 0x00400000L -#define ATOM_S6_ACC_REQ_DFP2 0x00800000L -#define ATOM_S6_ACC_REQ_CV 0x01000000L -#define ATOM_S6_ACC_REQ_DFP3 0x02000000L -#define ATOM_S6_ACC_REQ_DFP4 0x04000000L -#define ATOM_S6_ACC_REQ_DFP5 0x08000000L - -#define ATOM_S6_ACC_REQ_MASK 0x0FFF0000L -#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE 0x10000000L -#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH 0x20000000L -#define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L -#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L - -//Byte aligned defintion for BIOS usage -#define ATOM_S6_DEVICE_CHANGEb0 0x01 -#define ATOM_S6_SCALER_CHANGEb0 0x02 -#define ATOM_S6_LID_CHANGEb0 0x04 -#define ATOM_S6_DOCKING_CHANGEb0 0x08 -#define ATOM_S6_ACC_MODEb0 0x10 -#define ATOM_S6_EXT_DESKTOP_MODEb0 0x20 -#define ATOM_S6_LID_STATEb0 0x40 -#define ATOM_S6_DOCK_STATEb0 0x80 -#define ATOM_S6_CRITICAL_STATEb1 0x01 -#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02 -#define ATOM_S6_THERMAL_STATE_CHANGEb1 0x04 -#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08 -#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10 -#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20 - -#define ATOM_S6_ACC_REQ_CRT1b2 0x01 -#define ATOM_S6_ACC_REQ_LCD1b2 0x02 -#define ATOM_S6_ACC_REQ_TV1b2 0x04 -#define ATOM_S6_ACC_REQ_DFP1b2 0x08 -#define ATOM_S6_ACC_REQ_CRT2b2 0x10 -#define ATOM_S6_ACC_REQ_LCD2b2 0x20 -#define ATOM_S6_ACC_REQ_DFP6b2 0x40 -#define ATOM_S6_ACC_REQ_DFP2b2 0x80 -#define ATOM_S6_ACC_REQ_CVb3 0x01 -#define ATOM_S6_ACC_REQ_DFP3b3 0x02 -#define ATOM_S6_ACC_REQ_DFP4b3 0x04 -#define ATOM_S6_ACC_REQ_DFP5b3 0x08 - -#define ATOM_S6_ACC_REQ_DEVICEw1 ATOM_S5_DOS_REQ_DEVICEw0 -#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10 -#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCHb3 0x20 -#define ATOM_S6_VRI_BRIGHTNESS_CHANGEb3 0x40 -#define ATOM_S6_CONFIG_DISPLAY_CHANGEb3 0x80 - -#define ATOM_S6_DEVICE_CHANGE_SHIFT 0 -#define ATOM_S6_SCALER_CHANGE_SHIFT 1 -#define ATOM_S6_LID_CHANGE_SHIFT 2 -#define ATOM_S6_DOCKING_CHANGE_SHIFT 3 -#define ATOM_S6_ACC_MODE_SHIFT 4 -#define ATOM_S6_EXT_DESKTOP_MODE_SHIFT 5 -#define ATOM_S6_LID_STATE_SHIFT 6 -#define ATOM_S6_DOCK_STATE_SHIFT 7 -#define ATOM_S6_CRITICAL_STATE_SHIFT 8 -#define ATOM_S6_HW_I2C_BUSY_STATE_SHIFT 9 -#define ATOM_S6_THERMAL_STATE_CHANGE_SHIFT 10 -#define ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT 11 -#define ATOM_S6_REQ_SCALER_SHIFT 12 -#define ATOM_S6_REQ_SCALER_ARATIO_SHIFT 13 -#define ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT 14 -#define ATOM_S6_I2C_STATE_CHANGE_SHIFT 15 -#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT 28 -#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH_SHIFT 29 -#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT 30 -#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT 31 - -// BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!! -#define ATOM_S7_DOS_MODE_TYPEb0 0x03 -#define ATOM_S7_DOS_MODE_VGAb0 0x00 -#define ATOM_S7_DOS_MODE_VESAb0 0x01 -#define ATOM_S7_DOS_MODE_EXTb0 0x02 -#define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0 0x0C -#define ATOM_S7_DOS_MODE_PIXEL_FORMATb0 0xF0 -#define ATOM_S7_DOS_8BIT_DAC_ENb1 0x01 -#define ATOM_S7_ASIC_INIT_COMPLETEb1 0x02 -#define ATOM_S7_ASIC_INIT_COMPLETE_MASK 0x00000200 -#define ATOM_S7_DOS_MODE_NUMBERw1 0x0FFFF - -#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8 - -// BIOS_8_SCRATCH Definition -#define ATOM_S8_I2C_CHANNEL_BUSY_MASK 0x00000FFFF -#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000 - -#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT 0 -#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT 16 - -// BIOS_9_SCRATCH Definition -#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK -#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 0x0000FFFF -#endif -#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK -#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK 0xFFFF0000 -#endif -#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT -#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0 -#endif -#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT -#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 16 -#endif - - -#define ATOM_FLAG_SET 0x20 -#define ATOM_FLAG_CLEAR 0 -#define CLEAR_ATOM_S6_ACC_MODE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR) -#define SET_ATOM_S6_DEVICE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET) -#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET) -#define SET_ATOM_S6_SCALER_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET) -#define SET_ATOM_S6_LID_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET) - -#define SET_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET) -#define CLEAR_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR) - -#define SET_ATOM_S6_DOCK_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET) -#define SET_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET) -#define CLEAR_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR) - -#define SET_ATOM_S6_THERMAL_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET) -#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET) -#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET) - -#define SET_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET) -#define CLEAR_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR) - -#define SET_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET) -#define CLEAR_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR ) - -#define SET_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET ) -#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR ) - -#define SET_ATOM_S6_I2C_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET ) - -#define SET_ATOM_S6_DISPLAY_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET ) - -#define SET_ATOM_S6_DEVICE_RECONFIG ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET) -#define CLEAR_ATOM_S0_LCD1 ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 )| ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR ) -#define SET_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET ) -#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR ) - -/****************************************************************************/ -//Portion II: Definitinos only used in Driver -/****************************************************************************/ - -// Macros used by driver - -#ifdef __cplusplus -#define GetIndexIntoMasterTable(MasterOrData, FieldName) ((reinterpret_cast(&(static_cast(0))->FieldName)-static_cast(0))/sizeof(USHORT)) - -#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F) -#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) (((static_cast(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F) -#else // not __cplusplus -#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT)) - -#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F) -#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F) -#endif // __cplusplus - -#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION -#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION - -/****************************************************************************/ -//Portion III: Definitinos only used in VBIOS -/****************************************************************************/ -#define ATOM_DAC_SRC 0x80 -#define ATOM_SRC_DAC1 0 -#define ATOM_SRC_DAC2 0x80 - - - -typedef struct _MEMORY_PLLINIT_PARAMETERS -{ - ULONG ulTargetMemoryClock; //In 10Khz unit - UCHAR ucAction; //not define yet - UCHAR ucFbDiv_Hi; //Fbdiv Hi byte - UCHAR ucFbDiv; //FB value - UCHAR ucPostDiv; //Post div -}MEMORY_PLLINIT_PARAMETERS; - -#define MEMORY_PLLINIT_PS_ALLOCATION MEMORY_PLLINIT_PARAMETERS - - -#define GPIO_PIN_WRITE 0x01 -#define GPIO_PIN_READ 0x00 - -typedef struct _GPIO_PIN_CONTROL_PARAMETERS -{ - UCHAR ucGPIO_ID; //return value, read from GPIO pins - UCHAR ucGPIOBitShift; //define which bit in uGPIOBitVal need to be update - UCHAR ucGPIOBitVal; //Set/Reset corresponding bit defined in ucGPIOBitMask - UCHAR ucAction; //=GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write -}GPIO_PIN_CONTROL_PARAMETERS; - -typedef struct _ENABLE_SCALER_PARAMETERS -{ - UCHAR ucScaler; // ATOM_SCALER1, ATOM_SCALER2 - UCHAR ucEnable; // ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION - UCHAR ucTVStandard; // - UCHAR ucPadding[1]; -}ENABLE_SCALER_PARAMETERS; -#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS - -//ucEnable: -#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION 0 -#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION 1 -#define SCALER_ENABLE_2TAP_ALPHA_MODE 2 -#define SCALER_ENABLE_MULTITAP_MODE 3 - -typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS -{ - ULONG usHWIconHorzVertPosn; // Hardware Icon Vertical position - UCHAR ucHWIconVertOffset; // Hardware Icon Vertical offset - UCHAR ucHWIconHorzOffset; // Hardware Icon Horizontal offset - UCHAR ucSelection; // ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2 - UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE -}ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS; - -typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION -{ - ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon; - ENABLE_CRTC_PARAMETERS sReserved; -}ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION; - -typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS -{ - USHORT usHight; // Image Hight - USHORT usWidth; // Image Width - UCHAR ucSurface; // Surface 1 or 2 - UCHAR ucPadding[3]; -}ENABLE_GRAPH_SURFACE_PARAMETERS; - -typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2 -{ - USHORT usHight; // Image Hight - USHORT usWidth; // Image Width - UCHAR ucSurface; // Surface 1 or 2 - UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE - UCHAR ucPadding[2]; -}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2; - -typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3 -{ - USHORT usHight; // Image Hight - USHORT usWidth; // Image Width - UCHAR ucSurface; // Surface 1 or 2 - UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE - USHORT usDeviceId; // Active Device Id for this surface. If no device, set to 0. -}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3; - -typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4 -{ - USHORT usHight; // Image Hight - USHORT usWidth; // Image Width - USHORT usGraphPitch; - UCHAR ucColorDepth; - UCHAR ucPixelFormat; - UCHAR ucSurface; // Surface 1 or 2 - UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE - UCHAR ucModeType; - UCHAR ucReserved; -}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4; - -// ucEnable -#define ATOM_GRAPH_CONTROL_SET_PITCH 0x0f -#define ATOM_GRAPH_CONTROL_SET_DISP_START 0x10 - -typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION -{ - ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface; - ENABLE_YUV_PS_ALLOCATION sReserved; // Don't set this one -}ENABLE_GRAPH_SURFACE_PS_ALLOCATION; - -typedef struct _MEMORY_CLEAN_UP_PARAMETERS -{ - USHORT usMemoryStart; //in 8Kb boundry, offset from memory base address - USHORT usMemorySize; //8Kb blocks aligned -}MEMORY_CLEAN_UP_PARAMETERS; - -#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS - -typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS -{ - USHORT usX_Size; //When use as input parameter, usX_Size indicates which CRTC - USHORT usY_Size; -}GET_DISPLAY_SURFACE_SIZE_PARAMETERS; - -typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2 -{ - union{ - USHORT usX_Size; //When use as input parameter, usX_Size indicates which CRTC - USHORT usSurface; - }; - USHORT usY_Size; - USHORT usDispXStart; - USHORT usDispYStart; -}GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2; - - -typedef struct _PALETTE_DATA_CONTROL_PARAMETERS_V3 -{ - UCHAR ucLutId; - UCHAR ucAction; - USHORT usLutStartIndex; - USHORT usLutLength; - USHORT usLutOffsetInVram; -}PALETTE_DATA_CONTROL_PARAMETERS_V3; - -// ucAction: -#define PALETTE_DATA_AUTO_FILL 1 -#define PALETTE_DATA_READ 2 -#define PALETTE_DATA_WRITE 3 - - -typedef struct _INTERRUPT_SERVICE_PARAMETERS_V2 -{ - UCHAR ucInterruptId; - UCHAR ucServiceId; - UCHAR ucStatus; - UCHAR ucReserved; -}INTERRUPT_SERVICE_PARAMETER_V2; - -// ucInterruptId -#define HDP1_INTERRUPT_ID 1 -#define HDP2_INTERRUPT_ID 2 -#define HDP3_INTERRUPT_ID 3 -#define HDP4_INTERRUPT_ID 4 -#define HDP5_INTERRUPT_ID 5 -#define HDP6_INTERRUPT_ID 6 -#define SW_INTERRUPT_ID 11 - -// ucAction -#define INTERRUPT_SERVICE_GEN_SW_INT 1 -#define INTERRUPT_SERVICE_GET_STATUS 2 - - // ucStatus -#define INTERRUPT_STATUS__INT_TRIGGER 1 -#define INTERRUPT_STATUS__HPD_HIGH 2 - -typedef struct _EFUSE_INPUT_PARAMETER -{ - USHORT usEfuseIndex; - UCHAR ucBitShift; - UCHAR ucBitLength; -}EFUSE_INPUT_PARAMETER; - -// ReadEfuseValue command table input/output parameter -typedef union _READ_EFUSE_VALUE_PARAMETER -{ - EFUSE_INPUT_PARAMETER sEfuse; - ULONG ulEfuseValue; -}READ_EFUSE_VALUE_PARAMETER; - -typedef struct _INDIRECT_IO_ACCESS -{ - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR IOAccessSequence[256]; -} INDIRECT_IO_ACCESS; - -#define INDIRECT_READ 0x00 -#define INDIRECT_WRITE 0x80 - -#define INDIRECT_IO_MM 0 -#define INDIRECT_IO_PLL 1 -#define INDIRECT_IO_MC 2 -#define INDIRECT_IO_PCIE 3 -#define INDIRECT_IO_PCIEP 4 -#define INDIRECT_IO_NBMISC 5 -#define INDIRECT_IO_SMU 5 - -#define INDIRECT_IO_PLL_READ INDIRECT_IO_PLL | INDIRECT_READ -#define INDIRECT_IO_PLL_WRITE INDIRECT_IO_PLL | INDIRECT_WRITE -#define INDIRECT_IO_MC_READ INDIRECT_IO_MC | INDIRECT_READ -#define INDIRECT_IO_MC_WRITE INDIRECT_IO_MC | INDIRECT_WRITE -#define INDIRECT_IO_PCIE_READ INDIRECT_IO_PCIE | INDIRECT_READ -#define INDIRECT_IO_PCIE_WRITE INDIRECT_IO_PCIE | INDIRECT_WRITE -#define INDIRECT_IO_PCIEP_READ INDIRECT_IO_PCIEP | INDIRECT_READ -#define INDIRECT_IO_PCIEP_WRITE INDIRECT_IO_PCIEP | INDIRECT_WRITE -#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ -#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE -#define INDIRECT_IO_SMU_READ INDIRECT_IO_SMU | INDIRECT_READ -#define INDIRECT_IO_SMU_WRITE INDIRECT_IO_SMU | INDIRECT_WRITE - - -typedef struct _ATOM_OEM_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; -}ATOM_OEM_INFO; - -typedef struct _ATOM_TV_MODE -{ - UCHAR ucVMode_Num; //Video mode number - UCHAR ucTV_Mode_Num; //Internal TV mode number -}ATOM_TV_MODE; - -typedef struct _ATOM_BIOS_INT_TVSTD_MODE -{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usTV_Mode_LUT_Offset; // Pointer to standard to internal number conversion table - USHORT usTV_FIFO_Offset; // Pointer to FIFO entry table - USHORT usNTSC_Tbl_Offset; // Pointer to SDTV_Mode_NTSC table - USHORT usPAL_Tbl_Offset; // Pointer to SDTV_Mode_PAL table - USHORT usCV_Tbl_Offset; // Pointer to SDTV_Mode_PAL table -}ATOM_BIOS_INT_TVSTD_MODE; - - -typedef struct _ATOM_TV_MODE_SCALER_PTR -{ - USHORT ucFilter0_Offset; //Pointer to filter format 0 coefficients - USHORT usFilter1_Offset; //Pointer to filter format 0 coefficients - UCHAR ucTV_Mode_Num; -}ATOM_TV_MODE_SCALER_PTR; - -typedef struct _ATOM_STANDARD_VESA_TIMING -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_DTD_FORMAT aModeTimings[16]; // 16 is not the real array number, just for initial allocation -}ATOM_STANDARD_VESA_TIMING; - - -typedef struct _ATOM_STD_FORMAT -{ - USHORT usSTD_HDisp; - USHORT usSTD_VDisp; - USHORT usSTD_RefreshRate; - USHORT usReserved; -}ATOM_STD_FORMAT; - -typedef struct _ATOM_VESA_TO_EXTENDED_MODE -{ - USHORT usVESA_ModeNumber; - USHORT usExtendedModeNumber; -}ATOM_VESA_TO_EXTENDED_MODE; - -typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76]; -}ATOM_VESA_TO_INTENAL_MODE_LUT; - -/*************** ATOM Memory Related Data Structure ***********************/ -typedef struct _ATOM_MEMORY_VENDOR_BLOCK{ - UCHAR ucMemoryType; - UCHAR ucMemoryVendor; - UCHAR ucAdjMCId; - UCHAR ucDynClkId; - ULONG ulDllResetClkRange; -}ATOM_MEMORY_VENDOR_BLOCK; - - -typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG{ -#if ATOM_BIG_ENDIAN - ULONG ucMemBlkId:8; - ULONG ulMemClockRange:24; -#else - ULONG ulMemClockRange:24; - ULONG ucMemBlkId:8; -#endif -}ATOM_MEMORY_SETTING_ID_CONFIG; - -typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS -{ - ATOM_MEMORY_SETTING_ID_CONFIG slAccess; - ULONG ulAccess; -}ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS; - - -typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{ - ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID; - ULONG aulMemData[1]; -}ATOM_MEMORY_SETTING_DATA_BLOCK; - - -typedef struct _ATOM_INIT_REG_INDEX_FORMAT{ - USHORT usRegIndex; // MC register index - UCHAR ucPreRegDataLength; // offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf -}ATOM_INIT_REG_INDEX_FORMAT; - - -typedef struct _ATOM_INIT_REG_BLOCK{ - USHORT usRegIndexTblSize; //size of asRegIndexBuf - USHORT usRegDataBlkSize; //size of ATOM_MEMORY_SETTING_DATA_BLOCK - ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1]; - ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1]; -}ATOM_INIT_REG_BLOCK; - -#define END_OF_REG_INDEX_BLOCK 0x0ffff -#define END_OF_REG_DATA_BLOCK 0x00000000 -#define ATOM_INIT_REG_MASK_FLAG 0x80 //Not used in BIOS -#define CLOCK_RANGE_HIGHEST 0x00ffffff - -#define VALUE_DWORD SIZEOF ULONG -#define VALUE_SAME_AS_ABOVE 0 -#define VALUE_MASK_DWORD 0x84 - -#define INDEX_ACCESS_RANGE_BEGIN (VALUE_DWORD + 1) -#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1) -#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1) -//#define ACCESS_MCIODEBUGIND 0x40 //defined in BIOS code -#define ACCESS_PLACEHOLDER 0x80 - - -typedef struct _ATOM_MC_INIT_PARAM_TABLE -{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usAdjustARB_SEQDataOffset; - USHORT usMCInitMemTypeTblOffset; - USHORT usMCInitCommonTblOffset; - USHORT usMCInitPowerDownTblOffset; - ULONG ulARB_SEQDataBuf[32]; - ATOM_INIT_REG_BLOCK asMCInitMemType; - ATOM_INIT_REG_BLOCK asMCInitCommon; -}ATOM_MC_INIT_PARAM_TABLE; - - -typedef struct _ATOM_REG_INIT_SETTING -{ - USHORT usRegIndex; - ULONG ulRegValue; -}ATOM_REG_INIT_SETTING; - -typedef struct _ATOM_MC_INIT_PARAM_TABLE_V2_1 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulMCUcodeVersion; - ULONG ulMCUcodeRomStartAddr; - ULONG ulMCUcodeLength; - USHORT usMcRegInitTableOffset; // offset of ATOM_REG_INIT_SETTING array for MC core register settings. - USHORT usReserved; // offset of ATOM_INIT_REG_BLOCK for MC SEQ/PHY regsiter setting -}ATOM_MC_INIT_PARAM_TABLE_V2_1; - - -#define _4Mx16 0x2 -#define _4Mx32 0x3 -#define _8Mx16 0x12 -#define _8Mx32 0x13 -#define _8Mx128 0x15 -#define _16Mx16 0x22 -#define _16Mx32 0x23 -#define _16Mx128 0x25 -#define _32Mx16 0x32 -#define _32Mx32 0x33 -#define _32Mx128 0x35 -#define _64Mx32 0x43 -#define _64Mx8 0x41 -#define _64Mx16 0x42 -#define _128Mx8 0x51 -#define _128Mx16 0x52 -#define _128Mx32 0x53 -#define _256Mx8 0x61 -#define _256Mx16 0x62 -#define _512Mx8 0x71 - - -#define SAMSUNG 0x1 -#define INFINEON 0x2 -#define ELPIDA 0x3 -#define ETRON 0x4 -#define NANYA 0x5 -#define HYNIX 0x6 -#define MOSEL 0x7 -#define WINBOND 0x8 -#define ESMT 0x9 -#define MICRON 0xF - -#define QIMONDA INFINEON -#define PROMOS MOSEL -#define KRETON INFINEON -#define ELIXIR NANYA -#define MEZZA ELPIDA - - -/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// - -#define UCODE_ROM_START_ADDRESS 0x1b800 -#define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode - -//uCode block header for reference - -typedef struct _MCuCodeHeader -{ - ULONG ulSignature; - UCHAR ucRevision; - UCHAR ucChecksum; - UCHAR ucReserved1; - UCHAR ucReserved2; - USHORT usParametersLength; - USHORT usUCodeLength; - USHORT usReserved1; - USHORT usReserved2; -} MCuCodeHeader; - -////////////////////////////////////////////////////////////////////////////////// - -#define ATOM_MAX_NUMBER_OF_VRAM_MODULE 16 - -#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK 0xF -typedef struct _ATOM_VRAM_MODULE_V1 -{ - ULONG ulReserved; - USHORT usEMRSValue; - USHORT usMRSValue; - USHORT usReserved; - UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module - UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved; - UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender - UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... - UCHAR ucRow; // Number of Row,in power of 2; - UCHAR ucColumn; // Number of Column,in power of 2; - UCHAR ucBank; // Nunber of Bank; - UCHAR ucRank; // Number of Rank, in power of 2 - UCHAR ucChannelNum; // Number of channel; - UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 - UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; - UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; - UCHAR ucReserved[2]; -}ATOM_VRAM_MODULE_V1; - - -typedef struct _ATOM_VRAM_MODULE_V2 -{ - ULONG ulReserved; - ULONG ulFlags; // To enable/disable functionalities based on memory type - ULONG ulEngineClock; // Override of default engine clock for particular memory type - ULONG ulMemoryClock; // Override of default memory clock for particular memory type - USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type - USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type - USHORT usEMRSValue; - USHORT usMRSValue; - USHORT usReserved; - UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module - UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; - UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed - UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... - UCHAR ucRow; // Number of Row,in power of 2; - UCHAR ucColumn; // Number of Column,in power of 2; - UCHAR ucBank; // Nunber of Bank; - UCHAR ucRank; // Number of Rank, in power of 2 - UCHAR ucChannelNum; // Number of channel; - UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 - UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; - UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; - UCHAR ucRefreshRateFactor; - UCHAR ucReserved[3]; -}ATOM_VRAM_MODULE_V2; - - -typedef struct _ATOM_MEMORY_TIMING_FORMAT -{ - ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing - union{ - USHORT usMRS; // mode register - USHORT usDDR3_MR0; - }; - union{ - USHORT usEMRS; // extended mode register - USHORT usDDR3_MR1; - }; - UCHAR ucCL; // CAS latency - UCHAR ucWL; // WRITE Latency - UCHAR uctRAS; // tRAS - UCHAR uctRC; // tRC - UCHAR uctRFC; // tRFC - UCHAR uctRCDR; // tRCDR - UCHAR uctRCDW; // tRCDW - UCHAR uctRP; // tRP - UCHAR uctRRD; // tRRD - UCHAR uctWR; // tWR - UCHAR uctWTR; // tWTR - UCHAR uctPDIX; // tPDIX - UCHAR uctFAW; // tFAW - UCHAR uctAOND; // tAOND - union - { - struct { - UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon - UCHAR ucReserved; - }; - USHORT usDDR3_MR2; - }; -}ATOM_MEMORY_TIMING_FORMAT; - - -typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1 -{ - ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing - USHORT usMRS; // mode register - USHORT usEMRS; // extended mode register - UCHAR ucCL; // CAS latency - UCHAR ucWL; // WRITE Latency - UCHAR uctRAS; // tRAS - UCHAR uctRC; // tRC - UCHAR uctRFC; // tRFC - UCHAR uctRCDR; // tRCDR - UCHAR uctRCDW; // tRCDW - UCHAR uctRP; // tRP - UCHAR uctRRD; // tRRD - UCHAR uctWR; // tWR - UCHAR uctWTR; // tWTR - UCHAR uctPDIX; // tPDIX - UCHAR uctFAW; // tFAW - UCHAR uctAOND; // tAOND - UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon -////////////////////////////////////GDDR parameters/////////////////////////////////// - UCHAR uctCCDL; // - UCHAR uctCRCRL; // - UCHAR uctCRCWL; // - UCHAR uctCKE; // - UCHAR uctCKRSE; // - UCHAR uctCKRSX; // - UCHAR uctFAW32; // - UCHAR ucMR5lo; // - UCHAR ucMR5hi; // - UCHAR ucTerminator; -}ATOM_MEMORY_TIMING_FORMAT_V1; - - - - -typedef struct _ATOM_MEMORY_TIMING_FORMAT_V2 -{ - ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing - USHORT usMRS; // mode register - USHORT usEMRS; // extended mode register - UCHAR ucCL; // CAS latency - UCHAR ucWL; // WRITE Latency - UCHAR uctRAS; // tRAS - UCHAR uctRC; // tRC - UCHAR uctRFC; // tRFC - UCHAR uctRCDR; // tRCDR - UCHAR uctRCDW; // tRCDW - UCHAR uctRP; // tRP - UCHAR uctRRD; // tRRD - UCHAR uctWR; // tWR - UCHAR uctWTR; // tWTR - UCHAR uctPDIX; // tPDIX - UCHAR uctFAW; // tFAW - UCHAR uctAOND; // tAOND - UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon -////////////////////////////////////GDDR parameters/////////////////////////////////// - UCHAR uctCCDL; // - UCHAR uctCRCRL; // - UCHAR uctCRCWL; // - UCHAR uctCKE; // - UCHAR uctCKRSE; // - UCHAR uctCKRSX; // - UCHAR uctFAW32; // - UCHAR ucMR4lo; // - UCHAR ucMR4hi; // - UCHAR ucMR5lo; // - UCHAR ucMR5hi; // - UCHAR ucTerminator; - UCHAR ucReserved; -}ATOM_MEMORY_TIMING_FORMAT_V2; - - -typedef struct _ATOM_MEMORY_FORMAT -{ - ULONG ulDllDisClock; // memory DLL will be disable when target memory clock is below this clock - union{ - USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type - USHORT usDDR3_Reserved; // Not used for DDR3 memory - }; - union{ - USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type - USHORT usDDR3_MR3; // Used for DDR3 memory - }; - UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; - UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed - UCHAR ucRow; // Number of Row,in power of 2; - UCHAR ucColumn; // Number of Column,in power of 2; - UCHAR ucBank; // Nunber of Bank; - UCHAR ucRank; // Number of Rank, in power of 2 - UCHAR ucBurstSize; // burst size, 0= burst size=4 1= burst size=8 - UCHAR ucDllDisBit; // position of DLL Enable/Disable bit in EMRS ( Extended Mode Register ) - UCHAR ucRefreshRateFactor; // memory refresh rate in unit of ms - UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 - UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble - UCHAR ucMemAttrib; // Memory Device Addribute, like RDBI/WDBI etc - ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; // Memory Timing block sort from lower clock to higher clock -}ATOM_MEMORY_FORMAT; - - -typedef struct _ATOM_VRAM_MODULE_V3 -{ - ULONG ulChannelMapCfg; // board dependent paramenter:Channel combination - USHORT usSize; // size of ATOM_VRAM_MODULE_V3 - USHORT usDefaultMVDDQ; // board dependent parameter:Default Memory Core Voltage - USHORT usDefaultMVDDC; // board dependent parameter:Default Memory IO Voltage - UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module - UCHAR ucChannelNum; // board dependent parameter:Number of channel; - UCHAR ucChannelSize; // board dependent parameter:32bit or 64bit - UCHAR ucVREFI; // board dependnt parameter: EXT or INT +160mv to -140mv - UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters - UCHAR ucFlag; // To enable/disable functionalities based on memory type - ATOM_MEMORY_FORMAT asMemory; // describ all of video memory parameters from memory spec -}ATOM_VRAM_MODULE_V3; - - -//ATOM_VRAM_MODULE_V3.ucNPL_RT -#define NPL_RT_MASK 0x0f -#define BATTERY_ODT_MASK 0xc0 - -#define ATOM_VRAM_MODULE ATOM_VRAM_MODULE_V3 - -typedef struct _ATOM_VRAM_MODULE_V4 -{ - ULONG ulChannelMapCfg; // board dependent parameter: Channel combination - USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE - USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! - // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) - USHORT usReserved; - UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module - UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; - UCHAR ucChannelNum; // Number of channels present in this module config - UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits - UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 - UCHAR ucFlag; // To enable/disable functionalities based on memory type - UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 - UCHAR ucVREFI; // board dependent parameter - UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters - UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble - UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! - // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros - UCHAR ucReserved[3]; - -//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level - union{ - USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type - USHORT usDDR3_Reserved; - }; - union{ - USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type - USHORT usDDR3_MR3; // Used for DDR3 memory - }; - UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed - UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) - UCHAR ucReserved2[2]; - ATOM_MEMORY_TIMING_FORMAT asMemTiming[5];//Memory Timing block sort from lower clock to higher clock -}ATOM_VRAM_MODULE_V4; - -#define VRAM_MODULE_V4_MISC_RANK_MASK 0x3 -#define VRAM_MODULE_V4_MISC_DUAL_RANK 0x1 -#define VRAM_MODULE_V4_MISC_BL_MASK 0x4 -#define VRAM_MODULE_V4_MISC_BL8 0x4 -#define VRAM_MODULE_V4_MISC_DUAL_CS 0x10 - -typedef struct _ATOM_VRAM_MODULE_V5 -{ - ULONG ulChannelMapCfg; // board dependent parameter: Channel combination - USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE - USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! - // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) - USHORT usReserved; - UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module - UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; - UCHAR ucChannelNum; // Number of channels present in this module config - UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits - UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 - UCHAR ucFlag; // To enable/disable functionalities based on memory type - UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 - UCHAR ucVREFI; // board dependent parameter - UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters - UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble - UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! - // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros - UCHAR ucReserved[3]; - -//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level - USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type - USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type - UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed - UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) - UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth - UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth - ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock -}ATOM_VRAM_MODULE_V5; - - -typedef struct _ATOM_VRAM_MODULE_V6 -{ - ULONG ulChannelMapCfg; // board dependent parameter: Channel combination - USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE - USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! - // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) - USHORT usReserved; - UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module - UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; - UCHAR ucChannelNum; // Number of channels present in this module config - UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits - UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 - UCHAR ucFlag; // To enable/disable functionalities based on memory type - UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 - UCHAR ucVREFI; // board dependent parameter - UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters - UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble - UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! - // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros - UCHAR ucReserved[3]; - -//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level - USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type - USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type - UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed - UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) - UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth - UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth - ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock -}ATOM_VRAM_MODULE_V6; - -typedef struct _ATOM_VRAM_MODULE_V7 -{ -// Design Specific Values - ULONG ulChannelMapCfg; // mmMC_SHARED_CHREMAP - USHORT usModuleSize; // Size of ATOM_VRAM_MODULE_V7 - USHORT usPrivateReserved; // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) - USHORT usEnableChannels; // bit vector which indicate which channels are enabled - UCHAR ucExtMemoryID; // Current memory module ID - UCHAR ucMemoryType; // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5 - UCHAR ucChannelNum; // Number of mem. channels supported in this module - UCHAR ucChannelWidth; // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT - UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 - UCHAR ucReserve; // In MC7x, the lower 4 bits are used as bit8-11 of memory size. In other MC code, it's not used. - UCHAR ucMisc; // RANK_OF_THISMEMORY etc. - UCHAR ucVREFI; // Not used. - UCHAR ucNPL_RT; // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2. - UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble - UCHAR ucMemorySize; // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros - USHORT usSEQSettingOffset; - UCHAR ucReserved; -// Memory Module specific values - USHORT usEMRS2Value; // EMRS2/MR2 Value. - USHORT usEMRS3Value; // EMRS3/MR3 Value. - UCHAR ucMemoryVenderID; // [7:4] Revision, [3:0] Vendor code - UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) - UCHAR ucFIFODepth; // FIFO depth can be detected during vendor detection, here is hardcoded per memory - UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth - char strMemPNString[20]; // part number end with '0'. -}ATOM_VRAM_MODULE_V7; - - -typedef struct _ATOM_VRAM_MODULE_V8 -{ -// Design Specific Values - ULONG ulChannelMapCfg; // mmMC_SHARED_CHREMAP - USHORT usModuleSize; // Size of ATOM_VRAM_MODULE_V7 - USHORT usMcRamCfg; // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) - USHORT usEnableChannels; // bit vector which indicate which channels are enabled - UCHAR ucExtMemoryID; // Current memory module ID - UCHAR ucMemoryType; // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5 - UCHAR ucChannelNum; // Number of mem. channels supported in this module - UCHAR ucChannelWidth; // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT - UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 - UCHAR ucBankCol; // bit[3:2]= BANK ( =2:16bank, =1:8bank, =0:4bank ) bit[1:0]=Col ( =2: 10 bit, =1:9bit, =0:8bit ) - UCHAR ucMisc; // RANK_OF_THISMEMORY etc. - UCHAR ucVREFI; // Not used. - USHORT usReserved; // Not used - USHORT usMemorySize; // Total memory size in unit of MB for CONFIG_MEMSIZE zeros - UCHAR ucMcTunningSetId; // MC phy registers set per. - UCHAR ucRowNum; -// Memory Module specific values - USHORT usEMRS2Value; // EMRS2/MR2 Value. - USHORT usEMRS3Value; // EMRS3/MR3 Value. - UCHAR ucMemoryVenderID; // [7:4] Revision, [3:0] Vendor code - UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) - UCHAR ucFIFODepth; // FIFO depth can be detected during vendor detection, here is hardcoded per memory - UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth - - ULONG ulChannelMapCfg1; // channel mapping for channel8~15 - ULONG ulBankMapCfg; - ULONG ulReserved; - char strMemPNString[20]; // part number end with '0'. -}ATOM_VRAM_MODULE_V8; - - -typedef struct _ATOM_VRAM_INFO_V2 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR ucNumOfVRAMModule; - ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; -}ATOM_VRAM_INFO_V2; - -typedef struct _ATOM_VRAM_INFO_V3 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting - USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting - USHORT usRerseved; - UCHAR aVID_PinsShift[9]; // 8 bit strap maximum+terminator - UCHAR ucNumOfVRAMModule; - ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; - ATOM_INIT_REG_BLOCK asMemPatch; // for allocation - -}ATOM_VRAM_INFO_V3; - -#define ATOM_VRAM_INFO_LAST ATOM_VRAM_INFO_V3 - -typedef struct _ATOM_VRAM_INFO_V4 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting - USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting - USHORT usRerseved; - UCHAR ucMemDQ7_0ByteRemap; // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3 - ULONG ulMemDQ7_0BitRemap; // each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21] - UCHAR ucReservde[4]; - UCHAR ucNumOfVRAMModule; - ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; - ATOM_INIT_REG_BLOCK asMemPatch; // for allocation -}ATOM_VRAM_INFO_V4; - -typedef struct _ATOM_VRAM_INFO_HEADER_V2_1 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting - USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting - USHORT usPerBytePresetOffset; // offset of ATOM_INIT_REG_BLOCK structure for Per Byte Offset Preset Settings - USHORT usReserved[3]; - UCHAR ucNumOfVRAMModule; // indicate number of VRAM module - UCHAR ucMemoryClkPatchTblVer; // version of memory AC timing register list - UCHAR ucVramModuleVer; // indicate ATOM_VRAM_MODUE version - UCHAR ucReserved; - ATOM_VRAM_MODULE_V7 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; -}ATOM_VRAM_INFO_HEADER_V2_1; - -typedef struct _ATOM_VRAM_INFO_HEADER_V2_2 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting - USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting - USHORT usMcAdjustPerTileTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for Per Byte Offset Preset Settings - USHORT usMcPhyInitTableOffset; // offset of ATOM_INIT_REG_BLOCK structure for MC phy init set - USHORT usDramDataRemapTblOffset; // offset of ATOM_DRAM_DATA_REMAP array to indicate DRAM data lane to GPU mapping - USHORT usReserved1; - UCHAR ucNumOfVRAMModule; // indicate number of VRAM module - UCHAR ucMemoryClkPatchTblVer; // version of memory AC timing register list - UCHAR ucVramModuleVer; // indicate ATOM_VRAM_MODUE version - UCHAR ucMcPhyTileNum; // indicate the MCD tile number which use in DramDataRemapTbl and usMcAdjustPerTileTblOffset - ATOM_VRAM_MODULE_V8 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; -}ATOM_VRAM_INFO_HEADER_V2_2; - - -typedef struct _ATOM_DRAM_DATA_REMAP -{ - UCHAR ucByteRemapCh0; - UCHAR ucByteRemapCh1; - ULONG ulByte0BitRemapCh0; - ULONG ulByte1BitRemapCh0; - ULONG ulByte2BitRemapCh0; - ULONG ulByte3BitRemapCh0; - ULONG ulByte0BitRemapCh1; - ULONG ulByte1BitRemapCh1; - ULONG ulByte2BitRemapCh1; - ULONG ulByte3BitRemapCh1; -}ATOM_DRAM_DATA_REMAP; - -typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR aVID_PinsShift[9]; // 8 bit strap maximum+terminator -}ATOM_VRAM_GPIO_DETECTION_INFO; - - -typedef struct _ATOM_MEMORY_TRAINING_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR ucTrainingLoop; - UCHAR ucReserved[3]; - ATOM_INIT_REG_BLOCK asMemTrainingSetting; -}ATOM_MEMORY_TRAINING_INFO; - - -typedef struct SW_I2C_CNTL_DATA_PARAMETERS -{ - UCHAR ucControl; - UCHAR ucData; - UCHAR ucSatus; - UCHAR ucTemp; -} SW_I2C_CNTL_DATA_PARAMETERS; - -#define SW_I2C_CNTL_DATA_PS_ALLOCATION SW_I2C_CNTL_DATA_PARAMETERS - -typedef struct _SW_I2C_IO_DATA_PARAMETERS -{ - USHORT GPIO_Info; - UCHAR ucAct; - UCHAR ucData; - } SW_I2C_IO_DATA_PARAMETERS; - -#define SW_I2C_IO_DATA_PS_ALLOCATION SW_I2C_IO_DATA_PARAMETERS - -/****************************SW I2C CNTL DEFINITIONS**********************/ -#define SW_I2C_IO_RESET 0 -#define SW_I2C_IO_GET 1 -#define SW_I2C_IO_DRIVE 2 -#define SW_I2C_IO_SET 3 -#define SW_I2C_IO_START 4 - -#define SW_I2C_IO_CLOCK 0 -#define SW_I2C_IO_DATA 0x80 - -#define SW_I2C_IO_ZERO 0 -#define SW_I2C_IO_ONE 0x100 - -#define SW_I2C_CNTL_READ 0 -#define SW_I2C_CNTL_WRITE 1 -#define SW_I2C_CNTL_START 2 -#define SW_I2C_CNTL_STOP 3 -#define SW_I2C_CNTL_OPEN 4 -#define SW_I2C_CNTL_CLOSE 5 -#define SW_I2C_CNTL_WRITE1BIT 6 - -//==============================VESA definition Portion=============================== -#define VESA_OEM_PRODUCT_REV '01.00' -#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support -#define VESA_MODE_WIN_ATTRIBUTE 7 -#define VESA_WIN_SIZE 64 - -typedef struct _PTR_32_BIT_STRUCTURE -{ - USHORT Offset16; - USHORT Segment16; -} PTR_32_BIT_STRUCTURE; - -typedef union _PTR_32_BIT_UNION -{ - PTR_32_BIT_STRUCTURE SegmentOffset; - ULONG Ptr32_Bit; -} PTR_32_BIT_UNION; - -typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE -{ - UCHAR VbeSignature[4]; - USHORT VbeVersion; - PTR_32_BIT_UNION OemStringPtr; - UCHAR Capabilities[4]; - PTR_32_BIT_UNION VideoModePtr; - USHORT TotalMemory; -} VBE_1_2_INFO_BLOCK_UPDATABLE; - - -typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE -{ - VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock; - USHORT OemSoftRev; - PTR_32_BIT_UNION OemVendorNamePtr; - PTR_32_BIT_UNION OemProductNamePtr; - PTR_32_BIT_UNION OemProductRevPtr; -} VBE_2_0_INFO_BLOCK_UPDATABLE; - -typedef union _VBE_VERSION_UNION -{ - VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock; - VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock; -} VBE_VERSION_UNION; - -typedef struct _VBE_INFO_BLOCK -{ - VBE_VERSION_UNION UpdatableVBE_Info; - UCHAR Reserved[222]; - UCHAR OemData[256]; -} VBE_INFO_BLOCK; - -typedef struct _VBE_FP_INFO -{ - USHORT HSize; - USHORT VSize; - USHORT FPType; - UCHAR RedBPP; - UCHAR GreenBPP; - UCHAR BlueBPP; - UCHAR ReservedBPP; - ULONG RsvdOffScrnMemSize; - ULONG RsvdOffScrnMEmPtr; - UCHAR Reserved[14]; -} VBE_FP_INFO; - -typedef struct _VESA_MODE_INFO_BLOCK -{ -// Mandatory information for all VBE revisions - USHORT ModeAttributes; // dw ? ; mode attributes - UCHAR WinAAttributes; // db ? ; window A attributes - UCHAR WinBAttributes; // db ? ; window B attributes - USHORT WinGranularity; // dw ? ; window granularity - USHORT WinSize; // dw ? ; window size - USHORT WinASegment; // dw ? ; window A start segment - USHORT WinBSegment; // dw ? ; window B start segment - ULONG WinFuncPtr; // dd ? ; real mode pointer to window function - USHORT BytesPerScanLine;// dw ? ; bytes per scan line - -//; Mandatory information for VBE 1.2 and above - USHORT XResolution; // dw ? ; horizontal resolution in pixels or characters - USHORT YResolution; // dw ? ; vertical resolution in pixels or characters - UCHAR XCharSize; // db ? ; character cell width in pixels - UCHAR YCharSize; // db ? ; character cell height in pixels - UCHAR NumberOfPlanes; // db ? ; number of memory planes - UCHAR BitsPerPixel; // db ? ; bits per pixel - UCHAR NumberOfBanks; // db ? ; number of banks - UCHAR MemoryModel; // db ? ; memory model type - UCHAR BankSize; // db ? ; bank size in KB - UCHAR NumberOfImagePages;// db ? ; number of images - UCHAR ReservedForPageFunction;//db 1 ; reserved for page function - -//; Direct Color fields(required for direct/6 and YUV/7 memory models) - UCHAR RedMaskSize; // db ? ; size of direct color red mask in bits - UCHAR RedFieldPosition; // db ? ; bit position of lsb of red mask - UCHAR GreenMaskSize; // db ? ; size of direct color green mask in bits - UCHAR GreenFieldPosition; // db ? ; bit position of lsb of green mask - UCHAR BlueMaskSize; // db ? ; size of direct color blue mask in bits - UCHAR BlueFieldPosition; // db ? ; bit position of lsb of blue mask - UCHAR RsvdMaskSize; // db ? ; size of direct color reserved mask in bits - UCHAR RsvdFieldPosition; // db ? ; bit position of lsb of reserved mask - UCHAR DirectColorModeInfo;// db ? ; direct color mode attributes - -//; Mandatory information for VBE 2.0 and above - ULONG PhysBasePtr; // dd ? ; physical address for flat memory frame buffer - ULONG Reserved_1; // dd 0 ; reserved - always set to 0 - USHORT Reserved_2; // dw 0 ; reserved - always set to 0 - -//; Mandatory information for VBE 3.0 and above - USHORT LinBytesPerScanLine; // dw ? ; bytes per scan line for linear modes - UCHAR BnkNumberOfImagePages;// db ? ; number of images for banked modes - UCHAR LinNumberOfImagPages; // db ? ; number of images for linear modes - UCHAR LinRedMaskSize; // db ? ; size of direct color red mask(linear modes) - UCHAR LinRedFieldPosition; // db ? ; bit position of lsb of red mask(linear modes) - UCHAR LinGreenMaskSize; // db ? ; size of direct color green mask(linear modes) - UCHAR LinGreenFieldPosition;// db ? ; bit position of lsb of green mask(linear modes) - UCHAR LinBlueMaskSize; // db ? ; size of direct color blue mask(linear modes) - UCHAR LinBlueFieldPosition; // db ? ; bit position of lsb of blue mask(linear modes) - UCHAR LinRsvdMaskSize; // db ? ; size of direct color reserved mask(linear modes) - UCHAR LinRsvdFieldPosition; // db ? ; bit position of lsb of reserved mask(linear modes) - ULONG MaxPixelClock; // dd ? ; maximum pixel clock(in Hz) for graphics mode - UCHAR Reserved; // db 190 dup (0) -} VESA_MODE_INFO_BLOCK; - -// BIOS function CALLS -#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 // ATI Extended Function code -#define ATOM_BIOS_FUNCTION_COP_MODE 0x00 -#define ATOM_BIOS_FUNCTION_SHORT_QUERY1 0x04 -#define ATOM_BIOS_FUNCTION_SHORT_QUERY2 0x05 -#define ATOM_BIOS_FUNCTION_SHORT_QUERY3 0x06 -#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B -#define ATOM_BIOS_FUNCTION_ASIC_DSTATE 0x0E -#define ATOM_BIOS_FUNCTION_DEBUG_PLAY 0x0F -#define ATOM_BIOS_FUNCTION_STV_STD 0x16 -#define ATOM_BIOS_FUNCTION_DEVICE_DET 0x17 -#define ATOM_BIOS_FUNCTION_DEVICE_SWITCH 0x18 - -#define ATOM_BIOS_FUNCTION_PANEL_CONTROL 0x82 -#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET 0x83 -#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH 0x84 -#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A -#define ATOM_BIOS_FUNCTION_SET_CMOS 0x8B -#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 // Sub function 80 -#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 // Sub function 80 - -#define ATOM_BIOS_FUNCTION_DISPLAY_INFO 0x8D -#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF 0x8E -#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F -#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 // Sub function 03 -#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 // Sub function 7 -#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 // Notify caller the current thermal state -#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 // Notify caller the current critical state -#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 // Sub function 85 -#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900// Sub function 89 -#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 // Notify caller that ADC is supported - - -#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 // Set DPMS -#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 // BL: Sub function 01 -#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 // BL: Sub function 02 -#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 // BH Parameter for DPMS ON. -#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 // BH Parameter for DPMS STANDBY -#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 // BH Parameter for DPMS SUSPEND -#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 // BH Parameter for DPMS OFF -#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 // BH Parameter for DPMS REDUCE ON (NOT SUPPORTED) - -#define ATOM_BIOS_RETURN_CODE_MASK 0x0000FF00L -#define ATOM_BIOS_REG_HIGH_MASK 0x0000FF00L -#define ATOM_BIOS_REG_LOW_MASK 0x000000FFL - -// structure used for VBIOS only - -//DispOutInfoTable -typedef struct _ASIC_TRANSMITTER_INFO -{ - USHORT usTransmitterObjId; - USHORT usSupportDevice; - UCHAR ucTransmitterCmdTblId; - UCHAR ucConfig; - UCHAR ucEncoderID; //available 1st encoder ( default ) - UCHAR ucOptionEncoderID; //available 2nd encoder ( optional ) - UCHAR uc2ndEncoderID; - UCHAR ucReserved; -}ASIC_TRANSMITTER_INFO; - -#define ASIC_TRANSMITTER_INFO_CONFIG__DVO_SDR_MODE 0x01 -#define ASIC_TRANSMITTER_INFO_CONFIG__COHERENT_MODE 0x02 -#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODEROBJ_ID_MASK 0xc4 -#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_A 0x00 -#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_B 0x04 -#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_C 0x40 -#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_D 0x44 -#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_E 0x80 -#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_F 0x84 - -typedef struct _ASIC_ENCODER_INFO -{ - UCHAR ucEncoderID; - UCHAR ucEncoderConfig; - USHORT usEncoderCmdTblId; -}ASIC_ENCODER_INFO; - -typedef struct _ATOM_DISP_OUT_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT ptrTransmitterInfo; - USHORT ptrEncoderInfo; - ASIC_TRANSMITTER_INFO asTransmitterInfo[1]; - ASIC_ENCODER_INFO asEncoderInfo[1]; -}ATOM_DISP_OUT_INFO; - - -typedef struct _ATOM_DISP_OUT_INFO_V2 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT ptrTransmitterInfo; - USHORT ptrEncoderInfo; - USHORT ptrMainCallParserFar; // direct address of main parser call in VBIOS binary. - ASIC_TRANSMITTER_INFO asTransmitterInfo[1]; - ASIC_ENCODER_INFO asEncoderInfo[1]; -}ATOM_DISP_OUT_INFO_V2; - - -typedef struct _ATOM_DISP_CLOCK_ID { - UCHAR ucPpllId; - UCHAR ucPpllAttribute; -}ATOM_DISP_CLOCK_ID; - -// ucPpllAttribute -#define CLOCK_SOURCE_SHAREABLE 0x01 -#define CLOCK_SOURCE_DP_MODE 0x02 -#define CLOCK_SOURCE_NONE_DP_MODE 0x04 - -//DispOutInfoTable -typedef struct _ASIC_TRANSMITTER_INFO_V2 -{ - USHORT usTransmitterObjId; - USHORT usDispClkIdOffset; // point to clock source id list supported by Encoder Object - UCHAR ucTransmitterCmdTblId; - UCHAR ucConfig; - UCHAR ucEncoderID; // available 1st encoder ( default ) - UCHAR ucOptionEncoderID; // available 2nd encoder ( optional ) - UCHAR uc2ndEncoderID; - UCHAR ucReserved; -}ASIC_TRANSMITTER_INFO_V2; - -typedef struct _ATOM_DISP_OUT_INFO_V3 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT ptrTransmitterInfo; - USHORT ptrEncoderInfo; - USHORT ptrMainCallParserFar; // direct address of main parser call in VBIOS binary. - USHORT usReserved; - UCHAR ucDCERevision; - UCHAR ucMaxDispEngineNum; - UCHAR ucMaxActiveDispEngineNum; - UCHAR ucMaxPPLLNum; - UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE - UCHAR ucDispCaps; - UCHAR ucReserved[2]; - ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only -}ATOM_DISP_OUT_INFO_V3; - -//ucDispCaps -#define DISPLAY_CAPS__DP_PCLK_FROM_PPLL 0x01 -#define DISPLAY_CAPS__FORCE_DISPDEV_CONNECTED 0x02 - -typedef enum CORE_REF_CLK_SOURCE{ - CLOCK_SRC_XTALIN=0, - CLOCK_SRC_XO_IN=1, - CLOCK_SRC_XO_IN2=2, -}CORE_REF_CLK_SOURCE; - -// DispDevicePriorityInfo -typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT asDevicePriority[16]; -}ATOM_DISPLAY_DEVICE_PRIORITY_INFO; - -//ProcessAuxChannelTransactionTable -typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS -{ - USHORT lpAuxRequest; - USHORT lpDataOut; - UCHAR ucChannelID; - union - { - UCHAR ucReplyStatus; - UCHAR ucDelay; - }; - UCHAR ucDataOutLen; - UCHAR ucReserved; -}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS; - -//ProcessAuxChannelTransactionTable -typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 -{ - USHORT lpAuxRequest; - USHORT lpDataOut; - UCHAR ucChannelID; - union - { - UCHAR ucReplyStatus; - UCHAR ucDelay; - }; - UCHAR ucDataOutLen; - UCHAR ucHPD_ID; //=0: HPD1, =1: HPD2, =2: HPD3, =3: HPD4, =4: HPD5, =5: HPD6 -}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2; - -#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS - -//GetSinkType - -typedef struct _DP_ENCODER_SERVICE_PARAMETERS -{ - USHORT ucLinkClock; - union - { - UCHAR ucConfig; // for DP training command - UCHAR ucI2cId; // use for GET_SINK_TYPE command - }; - UCHAR ucAction; - UCHAR ucStatus; - UCHAR ucLaneNum; - UCHAR ucReserved[2]; -}DP_ENCODER_SERVICE_PARAMETERS; - -// ucAction -#define ATOM_DP_ACTION_GET_SINK_TYPE 0x01 - -#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS - - -typedef struct _DP_ENCODER_SERVICE_PARAMETERS_V2 -{ - USHORT usExtEncoderObjId; // External Encoder Object Id, output parameter only, use when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION - UCHAR ucAuxId; - UCHAR ucAction; - UCHAR ucSinkType; // Iput and Output parameters. - UCHAR ucHPDId; // Input parameter, used when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION - UCHAR ucReserved[2]; -}DP_ENCODER_SERVICE_PARAMETERS_V2; - -typedef struct _DP_ENCODER_SERVICE_PS_ALLOCATION_V2 -{ - DP_ENCODER_SERVICE_PARAMETERS_V2 asDPServiceParam; - PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 asAuxParam; -}DP_ENCODER_SERVICE_PS_ALLOCATION_V2; - -// ucAction -#define DP_SERVICE_V2_ACTION_GET_SINK_TYPE 0x01 -#define DP_SERVICE_V2_ACTION_DET_LCD_CONNECTION 0x02 - - -// DP_TRAINING_TABLE -#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR -#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 ) -#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16 ) -#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24 ) -#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 32) -#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 40) -#define DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 48) -#define DP_I2C_AUX_DDC_WRITE_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 60) -#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 64) -#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 72) -#define DP_I2C_AUX_DDC_READ_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 76) -#define DP_I2C_AUX_DDC_WRITE_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80) -#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 84) - - -typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS -{ - UCHAR ucI2CSpeed; - union - { - UCHAR ucRegIndex; - UCHAR ucStatus; - }; - USHORT lpI2CDataOut; - UCHAR ucFlag; - UCHAR ucTransBytes; - UCHAR ucSlaveAddr; - UCHAR ucLineNumber; -}PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS; - -#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS - -//ucFlag -#define HW_I2C_WRITE 1 -#define HW_I2C_READ 0 -#define I2C_2BYTE_ADDR 0x02 - -/****************************************************************************/ -// Structures used by HW_Misc_OperationTable -/****************************************************************************/ -typedef struct _ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1 -{ - UCHAR ucCmd; // Input: To tell which action to take - UCHAR ucReserved[3]; - ULONG ulReserved; -}ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1; - -typedef struct _ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1 -{ - UCHAR ucReturnCode; // Output: Return value base on action was taken - UCHAR ucReserved[3]; - ULONG ulReserved; -}ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1; - -// Actions code -#define ATOM_GET_SDI_SUPPORT 0xF0 - -// Return code -#define ATOM_UNKNOWN_CMD 0 -#define ATOM_FEATURE_NOT_SUPPORTED 1 -#define ATOM_FEATURE_SUPPORTED 2 - -typedef struct _ATOM_HW_MISC_OPERATION_PS_ALLOCATION -{ - ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1 sInput_Output; - PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS sReserved; -}ATOM_HW_MISC_OPERATION_PS_ALLOCATION; - -/****************************************************************************/ - -typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2 -{ - UCHAR ucHWBlkInst; // HW block instance, 0, 1, 2, ... - UCHAR ucReserved[3]; -}SET_HWBLOCK_INSTANCE_PARAMETER_V2; - -#define HWBLKINST_INSTANCE_MASK 0x07 -#define HWBLKINST_HWBLK_MASK 0xF0 -#define HWBLKINST_HWBLK_SHIFT 0x04 - -//ucHWBlock -#define SELECT_DISP_ENGINE 0 -#define SELECT_DISP_PLL 1 -#define SELECT_DCIO_UNIPHY_LINK0 2 -#define SELECT_DCIO_UNIPHY_LINK1 3 -#define SELECT_DCIO_IMPCAL 4 -#define SELECT_DCIO_DIG 6 -#define SELECT_CRTC_PIXEL_RATE 7 -#define SELECT_VGA_BLK 8 - -// DIGTransmitterInfoTable structure used to program UNIPHY settings -typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_1{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usDPVsPreEmphSettingOffset; // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock - USHORT usPhyAnalogRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info - USHORT usPhyAnalogSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range - USHORT usPhyPllRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info - USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings -}DIG_TRANSMITTER_INFO_HEADER_V3_1; - -typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_2{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usDPVsPreEmphSettingOffset; // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock - USHORT usPhyAnalogRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info - USHORT usPhyAnalogSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range - USHORT usPhyPllRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info - USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings - USHORT usDPSSRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy SS Pll register Info - USHORT usDPSSSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy SS Pll Settings -}DIG_TRANSMITTER_INFO_HEADER_V3_2; - - -typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_3{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usDPVsPreEmphSettingOffset; // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock - USHORT usPhyAnalogRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info - USHORT usPhyAnalogSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range - USHORT usPhyPllRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info - USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings - USHORT usDPSSRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy SS Pll register Info - USHORT usDPSSSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy SS Pll Settings - USHORT usEDPVsLegacyModeOffset; // offset of PHY_ANALOG_SETTING_INFO * with eDP Legacy Mode Voltage Swing and Pre-Emphasis for each Link clock - USHORT useDPVsLowVdiffModeOffset; // offset of PHY_ANALOG_SETTING_INFO * with eDP Low VDiff Mode Voltage Swing and Pre-Emphasis for each Link clock - USHORT useDPVsHighVdiffModeOffset; // offset of PHY_ANALOG_SETTING_INFO * with eDP High VDiff Mode Voltage Swing and Pre-Emphasis for each Link clock - USHORT useDPVsStretchModeOffset; // offset of PHY_ANALOG_SETTING_INFO * with eDP Stretch Mode Voltage Swing and Pre-Emphasis for each Link clock - USHORT useDPVsSingleVdiffModeOffset; // offset of PHY_ANALOG_SETTING_INFO * with eDP Single Vdiff Mode Voltage Swing and Pre-Emphasis for each Link clock - USHORT useDPVsVariablePremModeOffset; // offset of PHY_ANALOG_SETTING_INFO * with eDP Single Vidff+Variable PreEmphasis Voltage Swing and Pre-Emphasis for each Link clock -}DIG_TRANSMITTER_INFO_HEADER_V3_3; - - -typedef struct _CLOCK_CONDITION_REGESTER_INFO{ - USHORT usRegisterIndex; - UCHAR ucStartBit; - UCHAR ucEndBit; -}CLOCK_CONDITION_REGESTER_INFO; - -typedef struct _CLOCK_CONDITION_SETTING_ENTRY{ - USHORT usMaxClockFreq; - UCHAR ucEncodeMode; - UCHAR ucPhySel; - ULONG ulAnalogSetting[1]; -}CLOCK_CONDITION_SETTING_ENTRY; - -typedef struct _CLOCK_CONDITION_SETTING_INFO{ - USHORT usEntrySize; - CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[1]; -}CLOCK_CONDITION_SETTING_INFO; - -typedef struct _PHY_CONDITION_REG_VAL{ - ULONG ulCondition; - ULONG ulRegVal; -}PHY_CONDITION_REG_VAL; - -typedef struct _PHY_CONDITION_REG_VAL_V2{ - ULONG ulCondition; - UCHAR ucCondition2; - ULONG ulRegVal; -}PHY_CONDITION_REG_VAL_V2; - -typedef struct _PHY_CONDITION_REG_INFO{ - USHORT usRegIndex; - USHORT usSize; - PHY_CONDITION_REG_VAL asRegVal[1]; -}PHY_CONDITION_REG_INFO; - -typedef struct _PHY_CONDITION_REG_INFO_V2{ - USHORT usRegIndex; - USHORT usSize; - PHY_CONDITION_REG_VAL_V2 asRegVal[1]; -}PHY_CONDITION_REG_INFO_V2; - -typedef struct _PHY_ANALOG_SETTING_INFO{ - UCHAR ucEncodeMode; - UCHAR ucPhySel; - USHORT usSize; - PHY_CONDITION_REG_INFO asAnalogSetting[1]; -}PHY_ANALOG_SETTING_INFO; - -typedef struct _PHY_ANALOG_SETTING_INFO_V2{ - UCHAR ucEncodeMode; - UCHAR ucPhySel; - USHORT usSize; - PHY_CONDITION_REG_INFO_V2 asAnalogSetting[1]; -}PHY_ANALOG_SETTING_INFO_V2; - - -typedef struct _GFX_HAVESTING_PARAMETERS { - UCHAR ucGfxBlkId; //GFX blk id to be harvested, like CU, RB or PRIM - UCHAR ucReserved; //reserved - UCHAR ucActiveUnitNumPerSH; //requested active CU/RB/PRIM number per shader array - UCHAR ucMaxUnitNumPerSH; //max CU/RB/PRIM number per shader array -} GFX_HAVESTING_PARAMETERS; - -//ucGfxBlkId -#define GFX_HARVESTING_CU_ID 0 -#define GFX_HARVESTING_RB_ID 1 -#define GFX_HARVESTING_PRIM_ID 2 - - -typedef struct _VBIOS_ROM_HEADER{ - UCHAR PciRomSignature[2]; - UCHAR ucPciRomSizeIn512bytes; - UCHAR ucJumpCoreMainInitBIOS; - USHORT usLabelCoreMainInitBIOS; - UCHAR PciReservedSpace[18]; - USHORT usPciDataStructureOffset; - UCHAR Rsvd1d_1a[4]; - char strIbm[3]; - UCHAR CheckSum[14]; - UCHAR ucBiosMsgNumber; - char str761295520[16]; - USHORT usLabelCoreVPOSTNoMode; - USHORT usSpecialPostOffset; - UCHAR ucSpeicalPostImageSizeIn512Bytes; - UCHAR Rsved47_45[3]; - USHORT usROM_HeaderInformationTableOffset; - UCHAR Rsved4f_4a[6]; - char strBuildTimeStamp[20]; - UCHAR ucJumpCoreXFuncFarHandler; - USHORT usCoreXFuncFarHandlerOffset; - UCHAR ucRsved67; - UCHAR ucJumpCoreVFuncFarHandler; - USHORT usCoreVFuncFarHandlerOffset; - UCHAR Rsved6d_6b[3]; - USHORT usATOM_BIOS_MESSAGE_Offset; -}VBIOS_ROM_HEADER; - -/****************************************************************************/ -//Portion VI: Definitinos for vbios MC scratch registers that driver used -/****************************************************************************/ - -#define MC_MISC0__MEMORY_TYPE_MASK 0xF0000000 -#define MC_MISC0__MEMORY_TYPE__GDDR1 0x10000000 -#define MC_MISC0__MEMORY_TYPE__DDR2 0x20000000 -#define MC_MISC0__MEMORY_TYPE__GDDR3 0x30000000 -#define MC_MISC0__MEMORY_TYPE__GDDR4 0x40000000 -#define MC_MISC0__MEMORY_TYPE__GDDR5 0x50000000 -#define MC_MISC0__MEMORY_TYPE__HBM 0x60000000 -#define MC_MISC0__MEMORY_TYPE__DDR3 0xB0000000 - -#define ATOM_MEM_TYPE_DDR_STRING "DDR" -#define ATOM_MEM_TYPE_DDR2_STRING "DDR2" -#define ATOM_MEM_TYPE_GDDR3_STRING "GDDR3" -#define ATOM_MEM_TYPE_GDDR4_STRING "GDDR4" -#define ATOM_MEM_TYPE_GDDR5_STRING "GDDR5" -#define ATOM_MEM_TYPE_HBM_STRING "HBM" -#define ATOM_MEM_TYPE_DDR3_STRING "DDR3" - -/****************************************************************************/ -//Portion VII: Definitinos being oboselete -/****************************************************************************/ - -//========================================================================================== -//Remove the definitions below when driver is ready! -typedef struct _ATOM_DAC_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usMaxFrequency; // in 10kHz unit - USHORT usReserved; -}ATOM_DAC_INFO; - - -typedef struct _COMPASSIONATE_DATA -{ - ATOM_COMMON_TABLE_HEADER sHeader; - - //============================== DAC1 portion - UCHAR ucDAC1_BG_Adjustment; - UCHAR ucDAC1_DAC_Adjustment; - USHORT usDAC1_FORCE_Data; - //============================== DAC2 portion - UCHAR ucDAC2_CRT2_BG_Adjustment; - UCHAR ucDAC2_CRT2_DAC_Adjustment; - USHORT usDAC2_CRT2_FORCE_Data; - USHORT usDAC2_CRT2_MUX_RegisterIndex; - UCHAR ucDAC2_CRT2_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low - UCHAR ucDAC2_NTSC_BG_Adjustment; - UCHAR ucDAC2_NTSC_DAC_Adjustment; - USHORT usDAC2_TV1_FORCE_Data; - USHORT usDAC2_TV1_MUX_RegisterIndex; - UCHAR ucDAC2_TV1_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low - UCHAR ucDAC2_CV_BG_Adjustment; - UCHAR ucDAC2_CV_DAC_Adjustment; - USHORT usDAC2_CV_FORCE_Data; - USHORT usDAC2_CV_MUX_RegisterIndex; - UCHAR ucDAC2_CV_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low - UCHAR ucDAC2_PAL_BG_Adjustment; - UCHAR ucDAC2_PAL_DAC_Adjustment; - USHORT usDAC2_TV2_FORCE_Data; -}COMPASSIONATE_DATA; - -/****************************Supported Device Info Table Definitions**********************/ -// ucConnectInfo: -// [7:4] - connector type -// = 1 - VGA connector -// = 2 - DVI-I -// = 3 - DVI-D -// = 4 - DVI-A -// = 5 - SVIDEO -// = 6 - COMPOSITE -// = 7 - LVDS -// = 8 - DIGITAL LINK -// = 9 - SCART -// = 0xA - HDMI_type A -// = 0xB - HDMI_type B -// = 0xE - Special case1 (DVI+DIN) -// Others=TBD -// [3:0] - DAC Associated -// = 0 - no DAC -// = 1 - DACA -// = 2 - DACB -// = 3 - External DAC -// Others=TBD -// - -typedef struct _ATOM_CONNECTOR_INFO -{ -#if ATOM_BIG_ENDIAN - UCHAR bfConnectorType:4; - UCHAR bfAssociatedDAC:4; -#else - UCHAR bfAssociatedDAC:4; - UCHAR bfConnectorType:4; -#endif -}ATOM_CONNECTOR_INFO; - -typedef union _ATOM_CONNECTOR_INFO_ACCESS -{ - ATOM_CONNECTOR_INFO sbfAccess; - UCHAR ucAccess; -}ATOM_CONNECTOR_INFO_ACCESS; - -typedef struct _ATOM_CONNECTOR_INFO_I2C -{ - ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo; - ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; -}ATOM_CONNECTOR_INFO_I2C; - - -typedef struct _ATOM_SUPPORTED_DEVICES_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usDeviceSupport; - ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO]; -}ATOM_SUPPORTED_DEVICES_INFO; - -#define NO_INT_SRC_MAPPED 0xFF - -typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP -{ - UCHAR ucIntSrcBitmap; -}ATOM_CONNECTOR_INC_SRC_BITMAP; - -typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usDeviceSupport; - ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2]; - ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2]; -}ATOM_SUPPORTED_DEVICES_INFO_2; - -typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usDeviceSupport; - ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE]; - ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE]; -}ATOM_SUPPORTED_DEVICES_INFO_2d1; - -#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1 - - - -typedef struct _ATOM_MISC_CONTROL_INFO -{ - USHORT usFrequency; - UCHAR ucPLL_ChargePump; // PLL charge-pump gain control - UCHAR ucPLL_DutyCycle; // PLL duty cycle control - UCHAR ucPLL_VCO_Gain; // PLL VCO gain control - UCHAR ucPLL_VoltageSwing; // PLL driver voltage swing control -}ATOM_MISC_CONTROL_INFO; - - -#define ATOM_MAX_MISC_INFO 4 - -typedef struct _ATOM_TMDS_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usMaxFrequency; // in 10Khz - ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO]; -}ATOM_TMDS_INFO; - - -typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE -{ - UCHAR ucTVStandard; //Same as TV standards defined above, - UCHAR ucPadding[1]; -}ATOM_ENCODER_ANALOG_ATTRIBUTE; - -typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE -{ - UCHAR ucAttribute; //Same as other digital encoder attributes defined above - UCHAR ucPadding[1]; -}ATOM_ENCODER_DIGITAL_ATTRIBUTE; - -typedef union _ATOM_ENCODER_ATTRIBUTE -{ - ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib; - ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib; -}ATOM_ENCODER_ATTRIBUTE; - - -typedef struct _DVO_ENCODER_CONTROL_PARAMETERS -{ - USHORT usPixelClock; - USHORT usEncoderID; - UCHAR ucDeviceType; //Use ATOM_DEVICE_xxx1_Index to indicate device type only. - UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT - ATOM_ENCODER_ATTRIBUTE usDevAttr; -}DVO_ENCODER_CONTROL_PARAMETERS; - -typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION -{ - DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder; - WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion -}DVO_ENCODER_CONTROL_PS_ALLOCATION; - - -#define ATOM_XTMDS_ASIC_SI164_ID 1 -#define ATOM_XTMDS_ASIC_SI178_ID 2 -#define ATOM_XTMDS_ASIC_TFP513_ID 3 -#define ATOM_XTMDS_SUPPORTED_SINGLELINK 0x00000001 -#define ATOM_XTMDS_SUPPORTED_DUALLINK 0x00000002 -#define ATOM_XTMDS_MVPU_FPGA 0x00000004 - - -typedef struct _ATOM_XTMDS_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usSingleLinkMaxFrequency; - ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //Point the ID on which I2C is used to control external chip - UCHAR ucXtransimitterID; - UCHAR ucSupportedLink; // Bit field, bit0=1, single link supported;bit1=1,dual link supported - UCHAR ucSequnceAlterID; // Even with the same external TMDS asic, it's possible that the program seqence alters - // due to design. This ID is used to alert driver that the sequence is not "standard"! - UCHAR ucMasterAddress; // Address to control Master xTMDS Chip - UCHAR ucSlaveAddress; // Address to control Slave xTMDS Chip -}ATOM_XTMDS_INFO; - -typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS -{ - UCHAR ucEnable; // ATOM_ENABLE=On or ATOM_DISABLE=Off - UCHAR ucDevice; // ATOM_DEVICE_DFP1_INDEX.... - UCHAR ucPadding[2]; -}DFP_DPMS_STATUS_CHANGE_PARAMETERS; - -/****************************Legacy Power Play Table Definitions **********************/ - -//Definitions for ulPowerPlayMiscInfo -#define ATOM_PM_MISCINFO_SPLIT_CLOCK 0x00000000L -#define ATOM_PM_MISCINFO_USING_MCLK_SRC 0x00000001L -#define ATOM_PM_MISCINFO_USING_SCLK_SRC 0x00000002L - -#define ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT 0x00000004L -#define ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH 0x00000008L - -#define ATOM_PM_MISCINFO_LOAD_PERFORMANCE_EN 0x00000010L - -#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN 0x00000020L -#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN 0x00000040L -#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L //When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program - -#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN 0x00000100L -#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN 0x00000200L -#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN 0x00000400L -#define ATOM_PM_MISCINFO_LOAD_BALANCE_EN 0x00000800L -#define ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE 0x00001000L -#define ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE 0x00002000L -#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE 0x00004000L - -#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE 0x00008000L -#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L -#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE 0x00020000L -#define ATOM_PM_MISCINFO_POWER_SAVING_MODE 0x00040000L -#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE 0x00080000L - -#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L //0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved -#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20 - -#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE 0x00400000L -#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2 0x00800000L -#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4 0x01000000L -#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L //When set, Dynamic -#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L //When set, Dynamic -#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L //When set, This mode is for acceleated 3D mode - -#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L //1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks) -#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT 28 -#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS 0x80000000L - -#define ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE 0x00000001L -#define ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT 0x00000002L -#define ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN 0x00000004L -#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO 0x00000008L -#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE 0x00000010L -#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN 0x00000020L -#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L //If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption. - //If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback -#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC 0x00000080L -#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN 0x00000100L -#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L - -//ucTableFormatRevision=1 -//ucTableContentRevision=1 -typedef struct _ATOM_POWERMODE_INFO -{ - ULONG ulMiscInfo; //The power level should be arranged in ascending order - ULONG ulReserved1; // must set to 0 - ULONG ulReserved2; // must set to 0 - USHORT usEngineClock; - USHORT usMemoryClock; - UCHAR ucVoltageDropIndex; // index to GPIO table - UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate - UCHAR ucMinTemperature; - UCHAR ucMaxTemperature; - UCHAR ucNumPciELanes; // number of PCIE lanes -}ATOM_POWERMODE_INFO; - -//ucTableFormatRevision=2 -//ucTableContentRevision=1 -typedef struct _ATOM_POWERMODE_INFO_V2 -{ - ULONG ulMiscInfo; //The power level should be arranged in ascending order - ULONG ulMiscInfo2; - ULONG ulEngineClock; - ULONG ulMemoryClock; - UCHAR ucVoltageDropIndex; // index to GPIO table - UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate - UCHAR ucMinTemperature; - UCHAR ucMaxTemperature; - UCHAR ucNumPciELanes; // number of PCIE lanes -}ATOM_POWERMODE_INFO_V2; - -//ucTableFormatRevision=2 -//ucTableContentRevision=2 -typedef struct _ATOM_POWERMODE_INFO_V3 -{ - ULONG ulMiscInfo; //The power level should be arranged in ascending order - ULONG ulMiscInfo2; - ULONG ulEngineClock; - ULONG ulMemoryClock; - UCHAR ucVoltageDropIndex; // index to Core (VDDC) votage table - UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate - UCHAR ucMinTemperature; - UCHAR ucMaxTemperature; - UCHAR ucNumPciELanes; // number of PCIE lanes - UCHAR ucVDDCI_VoltageDropIndex; // index to VDDCI votage table -}ATOM_POWERMODE_INFO_V3; - - -#define ATOM_MAX_NUMBEROF_POWER_BLOCK 8 - -#define ATOM_PP_OVERDRIVE_INTBITMAP_AUXWIN 0x01 -#define ATOM_PP_OVERDRIVE_INTBITMAP_OVERDRIVE 0x02 - -#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM63 0x01 -#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1032 0x02 -#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1030 0x03 -#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649 0x04 -#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64 0x05 -#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375 0x06 -#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 // Andigilog - - -typedef struct _ATOM_POWERPLAY_INFO -{ - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR ucOverdriveThermalController; - UCHAR ucOverdriveI2cLine; - UCHAR ucOverdriveIntBitmap; - UCHAR ucOverdriveControllerAddress; - UCHAR ucSizeOfPowerModeEntry; - UCHAR ucNumOfPowerModeEntries; - ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; -}ATOM_POWERPLAY_INFO; - -typedef struct _ATOM_POWERPLAY_INFO_V2 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR ucOverdriveThermalController; - UCHAR ucOverdriveI2cLine; - UCHAR ucOverdriveIntBitmap; - UCHAR ucOverdriveControllerAddress; - UCHAR ucSizeOfPowerModeEntry; - UCHAR ucNumOfPowerModeEntries; - ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; -}ATOM_POWERPLAY_INFO_V2; - -typedef struct _ATOM_POWERPLAY_INFO_V3 -{ - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR ucOverdriveThermalController; - UCHAR ucOverdriveI2cLine; - UCHAR ucOverdriveIntBitmap; - UCHAR ucOverdriveControllerAddress; - UCHAR ucSizeOfPowerModeEntry; - UCHAR ucNumOfPowerModeEntries; - ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; -}ATOM_POWERPLAY_INFO_V3; - - - -/**************************************************************************/ - - -// Following definitions are for compatiblity issue in different SW components. -#define ATOM_MASTER_DATA_TABLE_REVISION 0x01 -#define Object_Info Object_Header -#define AdjustARB_SEQ MC_InitParameter -#define VRAM_GPIO_DetectionInfo VoltageObjectInfo -#define ASIC_VDDCI_Info ASIC_ProfilingInfo -#define ASIC_MVDDQ_Info MemoryTrainingInfo -#define SS_Info PPLL_SS_Info -#define ASIC_MVDDC_Info ASIC_InternalSS_Info -#define DispDevicePriorityInfo SaveRestoreInfo -#define DispOutInfo TV_VideoMode - - -#define ATOM_ENCODER_OBJECT_TABLE ATOM_OBJECT_TABLE -#define ATOM_CONNECTOR_OBJECT_TABLE ATOM_OBJECT_TABLE - -//New device naming, remove them when both DAL/VBIOS is ready -#define DFP2I_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS -#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS - -#define DFP1X_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS -#define DFP1X_OUTPUT_CONTROL_PS_ALLOCATION DFP1X_OUTPUT_CONTROL_PARAMETERS - -#define DFP1I_OUTPUT_CONTROL_PARAMETERS DFP1_OUTPUT_CONTROL_PARAMETERS -#define DFP1I_OUTPUT_CONTROL_PS_ALLOCATION DFP1_OUTPUT_CONTROL_PS_ALLOCATION - -#define ATOM_DEVICE_DFP1I_SUPPORT ATOM_DEVICE_DFP1_SUPPORT -#define ATOM_DEVICE_DFP1X_SUPPORT ATOM_DEVICE_DFP2_SUPPORT - -#define ATOM_DEVICE_DFP1I_INDEX ATOM_DEVICE_DFP1_INDEX -#define ATOM_DEVICE_DFP1X_INDEX ATOM_DEVICE_DFP2_INDEX - -#define ATOM_DEVICE_DFP2I_INDEX 0x00000009 -#define ATOM_DEVICE_DFP2I_SUPPORT (0x1L << ATOM_DEVICE_DFP2I_INDEX) - -#define ATOM_S0_DFP1I ATOM_S0_DFP1 -#define ATOM_S0_DFP1X ATOM_S0_DFP2 - -#define ATOM_S0_DFP2I 0x00200000L -#define ATOM_S0_DFP2Ib2 0x20 - -#define ATOM_S2_DFP1I_DPMS_STATE ATOM_S2_DFP1_DPMS_STATE -#define ATOM_S2_DFP1X_DPMS_STATE ATOM_S2_DFP2_DPMS_STATE - -#define ATOM_S2_DFP2I_DPMS_STATE 0x02000000L -#define ATOM_S2_DFP2I_DPMS_STATEb3 0x02 - -#define ATOM_S3_DFP2I_ACTIVEb1 0x02 - -#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE -#define ATOM_S3_DFP1X_ACTIVE ATOM_S3_DFP2_ACTIVE - -#define ATOM_S3_DFP2I_ACTIVE 0x00000200L - -#define ATOM_S3_DFP1I_CRTC_ACTIVE ATOM_S3_DFP1_CRTC_ACTIVE -#define ATOM_S3_DFP1X_CRTC_ACTIVE ATOM_S3_DFP2_CRTC_ACTIVE -#define ATOM_S3_DFP2I_CRTC_ACTIVE 0x02000000L - - -#define ATOM_S3_DFP2I_CRTC_ACTIVEb3 0x02 -#define ATOM_S5_DOS_REQ_DFP2Ib1 0x02 - -#define ATOM_S5_DOS_REQ_DFP2I 0x0200 -#define ATOM_S6_ACC_REQ_DFP1I ATOM_S6_ACC_REQ_DFP1 -#define ATOM_S6_ACC_REQ_DFP1X ATOM_S6_ACC_REQ_DFP2 - -#define ATOM_S6_ACC_REQ_DFP2Ib3 0x02 -#define ATOM_S6_ACC_REQ_DFP2I 0x02000000L - -#define TMDS1XEncoderControl DVOEncoderControl -#define DFP1XOutputControl DVOOutputControl - -#define ExternalDFPOutputControl DFP1XOutputControl -#define EnableExternalTMDS_Encoder TMDS1XEncoderControl - -#define DFP1IOutputControl TMDSAOutputControl -#define DFP2IOutputControl LVTMAOutputControl - -#define DAC1_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS -#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION - -#define DAC2_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS -#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION - -#define ucDac1Standard ucDacStandard -#define ucDac2Standard ucDacStandard - -#define TMDS1EncoderControl TMDSAEncoderControl -#define TMDS2EncoderControl LVTMAEncoderControl - -#define DFP1OutputControl TMDSAOutputControl -#define DFP2OutputControl LVTMAOutputControl -#define CRT1OutputControl DAC1OutputControl -#define CRT2OutputControl DAC2OutputControl - -//These two lines will be removed for sure in a few days, will follow up with Michael V. -#define EnableLVDS_SS EnableSpreadSpectrumOnPPLL -#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL - -#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L -#define ATOM_S2_LCD1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE -#define ATOM_S2_TV1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE -#define ATOM_S2_DFP1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE -#define ATOM_S2_CRT2_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE - -#define ATOM_S6_ACC_REQ_TV2 0x00400000L -#define ATOM_DEVICE_TV2_INDEX 0x00000006 -#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX) -#define ATOM_S0_TV2 0x00100000L -#define ATOM_S3_TV2_ACTIVE ATOM_S3_DFP6_ACTIVE -#define ATOM_S3_TV2_CRTC_ACTIVE ATOM_S3_DFP6_CRTC_ACTIVE - -/*********************************************************************************/ - -#pragma pack() // BIOS data must use byte aligment - -#pragma pack(1) - -typedef struct _ATOM_HOLE_INFO -{ - USHORT usOffset; // offset of the hole ( from the start of the binary ) - USHORT usLength; // length of the hole ( in bytes ) -}ATOM_HOLE_INFO; - -typedef struct _ATOM_SERVICE_DESCRIPTION -{ - UCHAR ucRevision; // Holes set revision - UCHAR ucAlgorithm; // Hash algorithm - UCHAR ucSignatureType; // Signature type ( 0 - no signature, 1 - test, 2 - production ) - UCHAR ucReserved; - USHORT usSigOffset; // Signature offset ( from the start of the binary ) - USHORT usSigLength; // Signature length -}ATOM_SERVICE_DESCRIPTION; - - -typedef struct _ATOM_SERVICE_INFO -{ - ATOM_COMMON_TABLE_HEADER asHeader; - ATOM_SERVICE_DESCRIPTION asDescr; - UCHAR ucholesNo; // number of holes that follow - ATOM_HOLE_INFO holes[1]; // array of hole descriptions -}ATOM_SERVICE_INFO; - - - -#pragma pack() // BIOS data must use byte aligment - -// -// AMD ACPI Table -// -#pragma pack(1) - -typedef struct { - ULONG Signature; - ULONG TableLength; //Length - UCHAR Revision; - UCHAR Checksum; - UCHAR OemId[6]; - UCHAR OemTableId[8]; //UINT64 OemTableId; - ULONG OemRevision; - ULONG CreatorId; - ULONG CreatorRevision; -} AMD_ACPI_DESCRIPTION_HEADER; -/* -//EFI_ACPI_DESCRIPTION_HEADER from AcpiCommon.h -typedef struct { - UINT32 Signature; //0x0 - UINT32 Length; //0x4 - UINT8 Revision; //0x8 - UINT8 Checksum; //0x9 - UINT8 OemId[6]; //0xA - UINT64 OemTableId; //0x10 - UINT32 OemRevision; //0x18 - UINT32 CreatorId; //0x1C - UINT32 CreatorRevision; //0x20 -}EFI_ACPI_DESCRIPTION_HEADER; -*/ -typedef struct { - AMD_ACPI_DESCRIPTION_HEADER SHeader; - UCHAR TableUUID[16]; //0x24 - ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture. - ULONG Lib1ImageOffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture. - ULONG Reserved[4]; //0x3C -}UEFI_ACPI_VFCT; - -typedef struct { - ULONG PCIBus; //0x4C - ULONG PCIDevice; //0x50 - ULONG PCIFunction; //0x54 - USHORT VendorID; //0x58 - USHORT DeviceID; //0x5A - USHORT SSVID; //0x5C - USHORT SSID; //0x5E - ULONG Revision; //0x60 - ULONG ImageLength; //0x64 -}VFCT_IMAGE_HEADER; - - -typedef struct { - VFCT_IMAGE_HEADER VbiosHeader; - UCHAR VbiosContent[1]; -}GOP_VBIOS_CONTENT; - -typedef struct { - VFCT_IMAGE_HEADER Lib1Header; - UCHAR Lib1Content[1]; -}GOP_LIB1_CONTENT; - -#pragma pack() - - -#endif /* _ATOMBIOS_H */ - -#include "pptable.h" - diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index e60557417..1e0bba29e 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -812,7 +812,7 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a else args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; - if ((adev->flags & AMDGPU_IS_APU) && + if ((adev->flags & AMD_IS_APU) && (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { if (is_dp || !amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) { diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 519fa515c..484710cfd 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -64,6 +64,8 @@ #include "oss/oss_2_0_d.h" #include "oss/oss_2_0_sh_mask.h" +#include "amdgpu_amdkfd.h" + /* * Indirect registers accessor */ @@ -836,7 +838,7 @@ static u32 cik_get_xclk(struct amdgpu_device *adev) { u32 reference_clock = adev->clock.spll.reference_freq; - if (adev->flags & AMDGPU_IS_APU) { + if (adev->flags & AMD_IS_APU) { if (RREG32_SMC(ixGENERAL_PWRMGT) & GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK) return reference_clock / 2; } else { @@ -1233,7 +1235,7 @@ static void cik_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask) if (reset_mask & AMDGPU_RESET_VMC) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_VMC_MASK; - if (!(adev->flags & AMDGPU_IS_APU)) { + if (!(adev->flags & AMD_IS_APU)) { if (reset_mask & AMDGPU_RESET_MC) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_MC_MASK; } @@ -1409,7 +1411,7 @@ static void cik_gpu_pci_config_reset(struct amdgpu_device *adev) dev_warn(adev->dev, "Wait for MC idle timed out !\n"); } - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) kv_save_regs_for_reset(adev, &kv_save); /* disable BM */ @@ -1427,7 +1429,7 @@ static void cik_gpu_pci_config_reset(struct amdgpu_device *adev) } /* does asic init need to be run first??? */ - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) kv_restore_regs_for_reset(adev, &kv_save); } @@ -1571,7 +1573,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) if (amdgpu_pcie_gen2 == 0) return; - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) return; ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); @@ -1731,7 +1733,7 @@ static void cik_program_aspm(struct amdgpu_device *adev) return; /* XXX double check APUs */ - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) return; orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL); @@ -2451,14 +2453,21 @@ static int cik_common_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_amdkfd_suspend(adev); + return cik_common_hw_fini(adev); } static int cik_common_resume(void *handle) { + int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - return cik_common_hw_init(adev); + r = cik_common_hw_init(adev); + if (r) + return r; + + return amdgpu_amdkfd_resume(adev); } static bool cik_common_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 335318051..f7ab0aadf 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -179,6 +179,19 @@ static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc); } +static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) +{ + struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring); + int i; + + for (i = 0; i < count; i++) + if (sdma && sdma->burst_nop && (i == 0)) + amdgpu_ring_write(ring, ring->nop | + SDMA_NOP_COUNT(count - 1)); + else + amdgpu_ring_write(ring, ring->nop); +} + /** * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine * @@ -204,8 +217,8 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, next_rptr); /* IB packet must end on a 8 DW boundary */ - while ((ring->wptr & 7) != 4) - amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); + cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8); + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff); @@ -492,6 +505,8 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev) fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); + if (adev->sdma[i].feature_version >= 20) + adev->sdma[i].burst_nop = true; fw_data = (const __le32 *) (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); @@ -605,6 +620,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; + struct fence *f = NULL; unsigned i; unsigned index; int r; @@ -620,12 +636,11 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); - + memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(ring, NULL, 256, &ib); if (r) { - amdgpu_wb_free(adev, index); DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); - return r; + goto err0; } ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); @@ -634,20 +649,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[3] = 1; ib.ptr[4] = 0xDEADBEEF; ib.length_dw = 5; + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, + AMDGPU_FENCE_OWNER_UNDEFINED, + &f); + if (r) + goto err1; - r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); + r = fence_wait(f, false); if (r) { - amdgpu_ib_free(adev, &ib); - amdgpu_wb_free(adev, index); - DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); - return r; - } - r = amdgpu_fence_wait(ib.fence, false); - if (r) { - amdgpu_ib_free(adev, &ib); - amdgpu_wb_free(adev, index); DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - return r; + goto err1; } for (i = 0; i < adev->usec_timeout; i++) { tmp = le32_to_cpu(adev->wb.wb[index]); @@ -657,12 +668,17 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ib.fence->ring->idx, i); + ring->idx, i); + goto err1; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } + +err1: + fence_put(f); amdgpu_ib_free(adev, &ib); +err0: amdgpu_wb_free(adev, index); return r; } @@ -805,8 +821,19 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, */ static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib) { - while (ib->length_dw & 0x7) - ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0); + struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring); + u32 pad_count; + int i; + + pad_count = (8 - (ib->length_dw & 0x7)) % 8; + for (i = 0; i < pad_count; i++) + if (sdma && sdma->burst_nop && (i == 0)) + ib->ptr[ib->length_dw++] = + SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0) | + SDMA_NOP_COUNT(pad_count - 1); + else + ib->ptr[ib->length_dw++] = + SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0); } /** @@ -1293,6 +1320,7 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { .test_ring = cik_sdma_ring_test_ring, .test_ib = cik_sdma_ring_test_ib, .is_lockup = cik_sdma_ring_is_lockup, + .insert_nop = cik_sdma_ring_insert_nop, }; static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) @@ -1329,18 +1357,18 @@ static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev) * Used by the amdgpu ttm implementation to move pages if * registered as the asic copy callback. */ -static void cik_sdma_emit_copy_buffer(struct amdgpu_ring *ring, +static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count) { - amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0)); - amdgpu_ring_write(ring, byte_count); - amdgpu_ring_write(ring, 0); /* src/dst endian swap */ - amdgpu_ring_write(ring, lower_32_bits(src_offset)); - amdgpu_ring_write(ring, upper_32_bits(src_offset)); - amdgpu_ring_write(ring, lower_32_bits(dst_offset)); - amdgpu_ring_write(ring, upper_32_bits(dst_offset)); + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0); + ib->ptr[ib->length_dw++] = byte_count; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); + ib->ptr[ib->length_dw++] = upper_32_bits(src_offset); + ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); + ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); } /** @@ -1353,16 +1381,16 @@ static void cik_sdma_emit_copy_buffer(struct amdgpu_ring *ring, * * Fill GPU buffers using the DMA engine (CIK). */ -static void cik_sdma_emit_fill_buffer(struct amdgpu_ring *ring, +static void cik_sdma_emit_fill_buffer(struct amdgpu_ib *ib, uint32_t src_data, uint64_t dst_offset, uint32_t byte_count) { - amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0)); - amdgpu_ring_write(ring, lower_32_bits(dst_offset)); - amdgpu_ring_write(ring, upper_32_bits(dst_offset)); - amdgpu_ring_write(ring, src_data); - amdgpu_ring_write(ring, byte_count); + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0); + ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); + ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); + ib->ptr[ib->length_dw++] = src_data; + ib->ptr[ib->length_dw++] = byte_count; } static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = { @@ -1395,5 +1423,6 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; + adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; } } diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h index d19085a97..7f6d457f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/cikd.h +++ b/drivers/gpu/drm/amd/amdgpu/cikd.h @@ -487,6 +487,7 @@ (((op) & 0xFF) << 0)) /* sDMA opcodes */ #define SDMA_OPCODE_NOP 0 +# define SDMA_NOP_COUNT(x) (((x) & 0x3FFF) << 16) #define SDMA_OPCODE_COPY 1 # define SDMA_COPY_SUB_OPCODE_LINEAR 0 # define SDMA_COPY_SUB_OPCODE_TILED 1 @@ -552,6 +553,12 @@ #define VCE_CMD_IB_AUTO 0x00000005 #define VCE_CMD_SEMAPHORE 0x00000006 +/* if PTR32, these are the bases for scratch and lds */ +#define PRIVATE_BASE(x) ((x) << 0) /* scratch */ +#define SHARED_BASE(x) ((x) << 16) /* LDS */ + +#define KFD_CIK_SDMA_QUEUE_OFFSET 0x200 + /* valid for both DEFAULT_MTYPE and APE1_MTYPE */ enum { MTYPE_CACHED = 0, diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index fd29c18fc..2e3373ed4 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -1598,9 +1598,9 @@ static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev) if (pi->sys_info.nb_dpm_enable) { if (ps->force_high) - cz_dpm_nbdpm_lm_pstate_enable(adev, true); - else cz_dpm_nbdpm_lm_pstate_enable(adev, false); + else + cz_dpm_nbdpm_lm_pstate_enable(adev, true); } return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c index a72ffc7d6..e33180d33 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c @@ -814,7 +814,8 @@ int cz_smu_init(struct amdgpu_device *adev) * 3. map kernel virtual address */ ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, toc_buf); + true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, + toc_buf); if (ret) { dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret); @@ -822,7 +823,8 @@ int cz_smu_init(struct amdgpu_device *adev) } ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, smu_buf); + true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, + smu_buf); if (ret) { dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index ef36467c7..d4c82b625 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -126,9 +126,31 @@ static const u32 tonga_mgcg_cgcg_init[] = mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, }; +static const u32 golden_settings_fiji_a10[] = +{ + mmDCI_CLK_CNTL, 0x00000080, 0x00000000, + mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, + mmFBC_MISC, 0x1f311fff, 0x12300000, + mmHDMI_CONTROL, 0x31000111, 0x00000011, +}; + +static const u32 fiji_mgcg_cgcg_init[] = +{ + mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, + mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, +}; + static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { + case CHIP_FIJI: + amdgpu_program_register_sequence(adev, + fiji_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + golden_settings_fiji_a10, + (const u32)ARRAY_SIZE(golden_settings_fiji_a10)); + break; case CHIP_TONGA: amdgpu_program_register_sequence(adev, tonga_mgcg_cgcg_init, @@ -821,11 +843,11 @@ static u32 dce_v10_0_line_buffer_adjust(struct amdgpu_device *adev, buffer_alloc = 2; } else if (mode->crtc_hdisplay < 4096) { mem_cfg = 0; - buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; + buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; } else { DRM_DEBUG_KMS("Mode too big for LB!\n"); mem_cfg = 0; - buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; + buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; } } else { mem_cfg = 1; @@ -2907,6 +2929,7 @@ static int dce_v10_0_early_init(void *handle) dce_v10_0_set_irq_funcs(adev); switch (adev->asic_type) { + case CHIP_FIJI: case CHIP_TONGA: adev->mode_info.num_crtc = 6; /* XXX 7??? */ adev->mode_info.num_hpd = 6; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 329bca0f1..7e1cf5e4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -819,11 +819,11 @@ static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev, buffer_alloc = 2; } else if (mode->crtc_hdisplay < 4096) { mem_cfg = 0; - buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; + buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; } else { DRM_DEBUG_KMS("Mode too big for LB!\n"); mem_cfg = 0; - buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; + buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; } } else { mem_cfg = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 937879ed8..34b9c2a9d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -788,11 +788,11 @@ static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev, buffer_alloc = 2; } else if (mode->crtc_hdisplay < 4096) { tmp = 0; - buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; + buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; } else { DRM_DEBUG_KMS("Mode too big for LB!\n"); tmp = 0; - buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; + buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; } } else { tmp = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c new file mode 100644 index 000000000..56c2eea09 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c @@ -0,0 +1,181 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include "drmP.h" +#include "amdgpu.h" +#include "fiji_smumgr.h" + +/*(DEBLOBBED)*/ + +static void fiji_dpm_set_funcs(struct amdgpu_device *adev); + +static int fiji_dpm_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + fiji_dpm_set_funcs(adev); + + return 0; +} + +static int fiji_dpm_init_microcode(struct amdgpu_device *adev) +{ + char fw_name[30] = "/*(DEBLOBBED)*/"; + int err; + + err = reject_firmware(&adev->pm.fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->pm.fw); + +out: + if (err) { + DRM_ERROR("Failed to load firmware \"%s\"", fw_name); + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + } + return err; +} + +static int fiji_dpm_sw_init(void *handle) +{ + int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + ret = fiji_dpm_init_microcode(adev); + if (ret) + return ret; + + return 0; +} + +static int fiji_dpm_sw_fini(void *handle) +{ + return 0; +} + +static int fiji_dpm_hw_init(void *handle) +{ + int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + mutex_lock(&adev->pm.mutex); + + ret = fiji_smu_init(adev); + if (ret) { + DRM_ERROR("SMU initialization failed\n"); + goto fail; + } + + ret = fiji_smu_start(adev); + if (ret) { + DRM_ERROR("SMU start failed\n"); + goto fail; + } + + mutex_unlock(&adev->pm.mutex); + return 0; + +fail: + adev->firmware.smu_load = false; + mutex_unlock(&adev->pm.mutex); + return -EINVAL; +} + +static int fiji_dpm_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + mutex_lock(&adev->pm.mutex); + fiji_smu_fini(adev); + mutex_unlock(&adev->pm.mutex); + return 0; +} + +static int fiji_dpm_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + fiji_dpm_hw_fini(adev); + + return 0; +} + +static int fiji_dpm_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + fiji_dpm_hw_init(adev); + + return 0; +} + +static int fiji_dpm_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int fiji_dpm_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +const struct amd_ip_funcs fiji_dpm_ip_funcs = { + .early_init = fiji_dpm_early_init, + .late_init = NULL, + .sw_init = fiji_dpm_sw_init, + .sw_fini = fiji_dpm_sw_fini, + .hw_init = fiji_dpm_hw_init, + .hw_fini = fiji_dpm_hw_fini, + .suspend = fiji_dpm_suspend, + .resume = fiji_dpm_resume, + .is_idle = NULL, + .wait_for_idle = NULL, + .soft_reset = NULL, + .print_status = NULL, + .set_clockgating_state = fiji_dpm_set_clockgating_state, + .set_powergating_state = fiji_dpm_set_powergating_state, +}; + +static const struct amdgpu_dpm_funcs fiji_dpm_funcs = { + .get_temperature = NULL, + .pre_set_power_state = NULL, + .set_power_state = NULL, + .post_set_power_state = NULL, + .display_configuration_changed = NULL, + .get_sclk = NULL, + .get_mclk = NULL, + .print_power_state = NULL, + .debugfs_print_current_performance_level = NULL, + .force_performance_level = NULL, + .vblank_too_short = NULL, + .powergate_uvd = NULL, +}; + +static void fiji_dpm_set_funcs(struct amdgpu_device *adev) +{ + if (NULL == adev->pm.funcs) + adev->pm.funcs = &fiji_dpm_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h b/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h new file mode 100644 index 000000000..3c4824082 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h @@ -0,0 +1,182 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef FIJI_PP_SMC_H +#define FIJI_PP_SMC_H + +#pragma pack(push, 1) + +#define PPSMC_SWSTATE_FLAG_DC 0x01 +#define PPSMC_SWSTATE_FLAG_UVD 0x02 +#define PPSMC_SWSTATE_FLAG_VCE 0x04 + +#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00 +#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01 +#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff + +#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01 +#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02 +#define PPSMC_SYSTEMFLAG_GDDR5 0x04 + +#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08 + +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10 +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20 + +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07 +#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08 + +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00 +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01 + +#define PPSMC_DPM2FLAGS_TDPCLMP 0x01 +#define PPSMC_DPM2FLAGS_PWRSHFT 0x02 +#define PPSMC_DPM2FLAGS_OCP 0x04 + +#define PPSMC_DISPLAY_WATERMARK_LOW 0 +#define PPSMC_DISPLAY_WATERMARK_HIGH 1 + +#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01 +#define PPSMC_STATEFLAG_POWERBOOST 0x02 +#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04 +#define PPSMC_STATEFLAG_POWERSHIFT 0x08 +#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10 +#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20 +#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40 + +#define FDO_MODE_HARDWARE 0 +#define FDO_MODE_PIECE_WISE_LINEAR 1 + +enum FAN_CONTROL { + FAN_CONTROL_FUZZY, + FAN_CONTROL_TABLE +}; + +//Gemini Modes +#define PPSMC_GeminiModeNone 0 //Single GPU board +#define PPSMC_GeminiModeMaster 1 //Master GPU on a Gemini board +#define PPSMC_GeminiModeSlave 2 //Slave GPU on a Gemini board + +#define PPSMC_Result_OK ((uint16_t)0x01) +#define PPSMC_Result_NoMore ((uint16_t)0x02) +#define PPSMC_Result_NotNow ((uint16_t)0x03) +#define PPSMC_Result_Failed ((uint16_t)0xFF) +#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE) +#define PPSMC_Result_UnknownVT ((uint16_t)0xFD) + +typedef uint16_t PPSMC_Result; + +#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x)) + +#define PPSMC_MSG_Halt ((uint16_t)0x10) +#define PPSMC_MSG_Resume ((uint16_t)0x11) +#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12) +#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13) +#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14) +#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15) +#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16) +#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17) +#define PPSMC_MSG_LevelUp ((uint16_t)0x18) +#define PPSMC_MSG_LevelDown ((uint16_t)0x19) +#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a) +#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20) +#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f) +#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40) +#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41) +#define PPSMC_MSG_ForceHigh ((uint16_t)0x42) +#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43) +#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51) +#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52) +#define PPSMC_MSG_EnableCac ((uint16_t)0x53) +#define PPSMC_MSG_DisableCac ((uint16_t)0x54) +#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55) +#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56) +#define PPSMC_CACHistoryStart ((uint16_t)0x57) +#define PPSMC_CACHistoryStop ((uint16_t)0x58) +#define PPSMC_TDPClampingActive ((uint16_t)0x59) +#define PPSMC_TDPClampingInactive ((uint16_t)0x5A) +#define PPSMC_StartFanControl ((uint16_t)0x5B) +#define PPSMC_StopFanControl ((uint16_t)0x5C) +#define PPSMC_NoDisplay ((uint16_t)0x5D) +#define PPSMC_HasDisplay ((uint16_t)0x5E) +#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60) +#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61) +#define PPSMC_MSG_EnableULV ((uint16_t)0x62) +#define PPSMC_MSG_DisableULV ((uint16_t)0x63) +#define PPSMC_MSG_EnterULV ((uint16_t)0x64) +#define PPSMC_MSG_ExitULV ((uint16_t)0x65) +#define PPSMC_PowerShiftActive ((uint16_t)0x6A) +#define PPSMC_PowerShiftInactive ((uint16_t)0x6B) +#define PPSMC_OCPActive ((uint16_t)0x6C) +#define PPSMC_OCPInactive ((uint16_t)0x6D) +#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E) +#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F) +#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70) +#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71) +#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72) +#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73) +#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74) +#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75) +#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76) +#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77) +#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78) +#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79) +#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A) +#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B) +#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C) +#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D) +#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E) +#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F) +#define PPSMC_FlushDataCache ((uint16_t)0x80) +#define PPSMC_FlushInstrCache ((uint16_t)0x81) +#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82) +#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83) +#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84) +#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85) +#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86) +#define PPSMC_MSG_EnableDTE ((uint16_t)0x87) +#define PPSMC_MSG_DisableDTE ((uint16_t)0x88) +#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89) +#define PPSMC_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A) +#define PPSMC_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B) +#define PPSMC_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C) + +#define PPSMC_MSG_BREAK ((uint16_t)0xF8) + +#define PPSMC_MSG_Test ((uint16_t)0x100) +#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t)0x250) +#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t)0x251) +#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t)0x252) +#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t)0x253) +#define PPSMC_MSG_LoadUcodes ((uint16_t)0x254) + +typedef uint16_t PPSMC_Msg; + +#define PPSMC_EVENT_STATUS_THERMAL 0x00000001 +#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002 +#define PPSMC_EVENT_STATUS_DC 0x00000004 +#define PPSMC_EVENT_STATUS_GPIO17 0x00000008 + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c new file mode 100644 index 000000000..bda1249eb --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c @@ -0,0 +1,857 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include "drmP.h" +#include "amdgpu.h" +#include "fiji_ppsmc.h" +#include "fiji_smumgr.h" +#include "smu_ucode_xfer_vi.h" +#include "amdgpu_ucode.h" + +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" + +#define FIJI_SMC_SIZE 0x20000 + +static int fiji_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit) +{ + uint32_t val; + + if (smc_address & 3) + return -EINVAL; + + if ((smc_address + 3) > limit) + return -EINVAL; + + WREG32(mmSMC_IND_INDEX_0, smc_address); + + val = RREG32(mmSMC_IND_ACCESS_CNTL); + val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + WREG32(mmSMC_IND_ACCESS_CNTL, val); + + return 0; +} + +static int fiji_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit) +{ + uint32_t addr; + uint32_t data, orig_data; + int result = 0; + uint32_t extra_shift; + unsigned long flags; + + if (smc_start_address & 3) + return -EINVAL; + + if ((smc_start_address + byte_count) > limit) + return -EINVAL; + + addr = smc_start_address; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + while (byte_count >= 4) { + /* Bytes are written into the SMC addres space with the MSB first */ + data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; + + result = fiji_set_smc_sram_address(adev, addr, limit); + + if (result) + goto out; + + WREG32(mmSMC_IND_DATA_0, data); + + src += 4; + byte_count -= 4; + addr += 4; + } + + if (0 != byte_count) { + /* Now write odd bytes left, do a read modify write cycle */ + data = 0; + + result = fiji_set_smc_sram_address(adev, addr, limit); + if (result) + goto out; + + orig_data = RREG32(mmSMC_IND_DATA_0); + extra_shift = 8 * (4 - byte_count); + + while (byte_count > 0) { + data = (data << 8) + *src++; + byte_count--; + } + + data <<= extra_shift; + data |= (orig_data & ~((~0UL) << extra_shift)); + + result = fiji_set_smc_sram_address(adev, addr, limit); + if (result) + goto out; + + WREG32(mmSMC_IND_DATA_0, data); + } + +out: + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + return result; +} + +static int fiji_program_jump_on_start(struct amdgpu_device *adev) +{ + static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40}; + fiji_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1); + + return 0; +} + +static bool fiji_is_smc_ram_running(struct amdgpu_device *adev) +{ + uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable); + + return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C))); +} + +static int wait_smu_response(struct amdgpu_device *adev) +{ + int i; + uint32_t val; + + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32(mmSMC_RESP_0); + if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP)) + break; + udelay(1); + } + + if (i == adev->usec_timeout) + return -EINVAL; + + return 0; +} + +static int fiji_send_msg_to_smc_offset(struct amdgpu_device *adev) +{ + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send previous message\n"); + return -EINVAL; + } + + WREG32(mmSMC_MSG_ARG_0, 0x20000); + WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test); + + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send message\n"); + return -EINVAL; + } + + return 0; +} + +static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) +{ + if (!fiji_is_smc_ram_running(adev)) + { + return -EINVAL;; + } + + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send previous message\n"); + return -EINVAL; + } + + WREG32(mmSMC_MESSAGE_0, msg); + + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send message\n"); + return -EINVAL; + } + + return 0; +} + +static int fiji_send_msg_to_smc_without_waiting(struct amdgpu_device *adev, + PPSMC_Msg msg) +{ + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send previous message\n"); + return -EINVAL; + } + + WREG32(mmSMC_MESSAGE_0, msg); + + return 0; +} + +static int fiji_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, + PPSMC_Msg msg, + uint32_t parameter) +{ + if (!fiji_is_smc_ram_running(adev)) + return -EINVAL; + + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send previous message\n"); + return -EINVAL; + } + + WREG32(mmSMC_MSG_ARG_0, parameter); + + return fiji_send_msg_to_smc(adev, msg); +} + +static int fiji_send_msg_to_smc_with_parameter_without_waiting( + struct amdgpu_device *adev, + PPSMC_Msg msg, uint32_t parameter) +{ + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send previous message\n"); + return -EINVAL; + } + + WREG32(mmSMC_MSG_ARG_0, parameter); + + return fiji_send_msg_to_smc_without_waiting(adev, msg); +} + +#if 0 /* not used yet */ +static int fiji_wait_for_smc_inactive(struct amdgpu_device *adev) +{ + int i; + uint32_t val; + + if (!fiji_is_smc_ram_running(adev)) + return -EINVAL; + + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0) + break; + udelay(1); + } + + if (i == adev->usec_timeout) + return -EINVAL; + + return 0; +} +#endif + +static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev) +{ + const struct smc_firmware_header_v1_0 *hdr; + uint32_t ucode_size; + uint32_t ucode_start_address; + const uint8_t *src; + uint32_t val; + uint32_t byte_count; + uint32_t *data; + unsigned long flags; + + if (!adev->pm.fw) + return -EINVAL; + + hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; + amdgpu_ucode_print_smc_hdr(&hdr->header); + + adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); + ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); + ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); + src = (const uint8_t *) + (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + + if (ucode_size & 3) { + DRM_ERROR("SMC ucode is not 4 bytes aligned\n"); + return -EINVAL; + } + + if (ucode_size > FIJI_SMC_SIZE) { + DRM_ERROR("SMC address is beyond the SMC RAM area\n"); + return -EINVAL; + } + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + WREG32(mmSMC_IND_INDEX_0, ucode_start_address); + + val = RREG32(mmSMC_IND_ACCESS_CNTL); + val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); + WREG32(mmSMC_IND_ACCESS_CNTL, val); + + byte_count = ucode_size; + data = (uint32_t *)src; + for (; byte_count >= 4; data++, byte_count -= 4) + WREG32(mmSMC_IND_DATA_0, data[0]); + + val = RREG32(mmSMC_IND_ACCESS_CNTL); + val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + WREG32(mmSMC_IND_ACCESS_CNTL, val); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + + return 0; +} + +#if 0 /* not used yet */ +static int fiji_read_smc_sram_dword(struct amdgpu_device *adev, + uint32_t smc_address, + uint32_t *value, + uint32_t limit) +{ + int result; + unsigned long flags; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + result = fiji_set_smc_sram_address(adev, smc_address, limit); + if (result == 0) + *value = RREG32(mmSMC_IND_DATA_0); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + return result; +} + +static int fiji_write_smc_sram_dword(struct amdgpu_device *adev, + uint32_t smc_address, + uint32_t value, + uint32_t limit) +{ + int result; + unsigned long flags; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + result = fiji_set_smc_sram_address(adev, smc_address, limit); + if (result == 0) + WREG32(mmSMC_IND_DATA_0, value); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + return result; +} + +static int fiji_smu_stop_smc(struct amdgpu_device *adev) +{ + uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); + val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); + WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); + + val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); + WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); + + return 0; +} +#endif + +static enum AMDGPU_UCODE_ID fiji_convert_fw_type(uint32_t fw_type) +{ + switch (fw_type) { + case UCODE_ID_SDMA0: + return AMDGPU_UCODE_ID_SDMA0; + case UCODE_ID_SDMA1: + return AMDGPU_UCODE_ID_SDMA1; + case UCODE_ID_CP_CE: + return AMDGPU_UCODE_ID_CP_CE; + case UCODE_ID_CP_PFP: + return AMDGPU_UCODE_ID_CP_PFP; + case UCODE_ID_CP_ME: + return AMDGPU_UCODE_ID_CP_ME; + case UCODE_ID_CP_MEC: + case UCODE_ID_CP_MEC_JT1: + case UCODE_ID_CP_MEC_JT2: + return AMDGPU_UCODE_ID_CP_MEC1; + case UCODE_ID_RLC_G: + return AMDGPU_UCODE_ID_RLC_G; + default: + DRM_ERROR("ucode type is out of range!\n"); + return AMDGPU_UCODE_ID_MAXIMUM; + } +} + +static int fiji_smu_populate_single_firmware_entry(struct amdgpu_device *adev, + uint32_t fw_type, + struct SMU_Entry *entry) +{ + enum AMDGPU_UCODE_ID id = fiji_convert_fw_type(fw_type); + struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id]; + const struct gfx_firmware_header_v1_0 *header = NULL; + uint64_t gpu_addr; + uint32_t data_size; + + if (ucode->fw == NULL) + return -EINVAL; + gpu_addr = ucode->mc_addr; + header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; + data_size = le32_to_cpu(header->header.ucode_size_bytes); + + if ((fw_type == UCODE_ID_CP_MEC_JT1) || + (fw_type == UCODE_ID_CP_MEC_JT2)) { + gpu_addr += le32_to_cpu(header->jt_offset) << 2; + data_size = le32_to_cpu(header->jt_size) << 2; + } + + entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version); + entry->id = (uint16_t)fw_type; + entry->image_addr_high = upper_32_bits(gpu_addr); + entry->image_addr_low = lower_32_bits(gpu_addr); + entry->meta_data_addr_high = 0; + entry->meta_data_addr_low = 0; + entry->data_size_byte = data_size; + entry->num_register_entries = 0; + + if (fw_type == UCODE_ID_RLC_G) + entry->flags = 1; + else + entry->flags = 0; + + return 0; +} + +static int fiji_smu_request_load_fw(struct amdgpu_device *adev) +{ + struct fiji_smu_private_data *private = (struct fiji_smu_private_data *)adev->smu.priv; + struct SMU_DRAMData_TOC *toc; + uint32_t fw_to_load; + + WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0); + + fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high); + fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low); + + toc = (struct SMU_DRAMData_TOC *)private->header; + toc->num_entries = 0; + toc->structure_version = 1; + + if (!adev->firmware.smu_load) + return 0; + + if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for RLC\n"); + return -EINVAL; + } + + if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for CE\n"); + return -EINVAL; + } + + if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for PFP\n"); + return -EINVAL; + } + + if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for ME\n"); + return -EINVAL; + } + + if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for MEC\n"); + return -EINVAL; + } + + if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for MEC_JT1\n"); + return -EINVAL; + } + + if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for MEC_JT2\n"); + return -EINVAL; + } + + if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for SDMA0\n"); + return -EINVAL; + } + + if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for SDMA1\n"); + return -EINVAL; + } + + fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high); + fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low); + + fw_to_load = UCODE_ID_RLC_G_MASK | + UCODE_ID_SDMA0_MASK | + UCODE_ID_SDMA1_MASK | + UCODE_ID_CP_CE_MASK | + UCODE_ID_CP_ME_MASK | + UCODE_ID_CP_PFP_MASK | + UCODE_ID_CP_MEC_MASK; + + if (fiji_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) { + DRM_ERROR("Fail to request SMU load ucode\n"); + return -EINVAL; + } + + return 0; +} + +static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type) +{ + switch (fw_type) { + case AMDGPU_UCODE_ID_SDMA0: + return UCODE_ID_SDMA0_MASK; + case AMDGPU_UCODE_ID_SDMA1: + return UCODE_ID_SDMA1_MASK; + case AMDGPU_UCODE_ID_CP_CE: + return UCODE_ID_CP_CE_MASK; + case AMDGPU_UCODE_ID_CP_PFP: + return UCODE_ID_CP_PFP_MASK; + case AMDGPU_UCODE_ID_CP_ME: + return UCODE_ID_CP_ME_MASK; + case AMDGPU_UCODE_ID_CP_MEC1: + return UCODE_ID_CP_MEC_MASK; + case AMDGPU_UCODE_ID_CP_MEC2: + return UCODE_ID_CP_MEC_MASK; + case AMDGPU_UCODE_ID_RLC_G: + return UCODE_ID_RLC_G_MASK; + default: + DRM_ERROR("ucode type is out of range!\n"); + return 0; + } +} + +static int fiji_smu_check_fw_load_finish(struct amdgpu_device *adev, + uint32_t fw_type) +{ + uint32_t fw_mask = fiji_smu_get_mask_for_fw_type(fw_type); + int i; + + for (i = 0; i < adev->usec_timeout; i++) { + if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask)) + break; + udelay(1); + } + + if (i == adev->usec_timeout) { + DRM_ERROR("check firmware loading failed\n"); + return -EINVAL; + } + + return 0; +} + +static int fiji_smu_start_in_protection_mode(struct amdgpu_device *adev) +{ + int result; + uint32_t val; + int i; + + /* Assert reset */ + val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); + val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); + WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); + + result = fiji_smu_upload_firmware_image(adev); + if (result) + return result; + + /* Clear status */ + WREG32_SMC(ixSMU_STATUS, 0); + + /* Enable clock */ + val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); + WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); + + /* De-assert reset */ + val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); + val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); + WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); + + /* Set SMU Auto Start */ + val = RREG32_SMC(ixSMU_INPUT_DATA); + val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1); + WREG32_SMC(ixSMU_INPUT_DATA, val); + + /* Clear firmware interrupt enable flag */ + WREG32_SMC(ixFIRMWARE_FLAGS, 0); + + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixRCU_UC_EVENTS); + if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED)) + break; + udelay(1); + } + + if (i == adev->usec_timeout) { + DRM_ERROR("Interrupt is not enabled by firmware\n"); + return -EINVAL; + } + + /* Call Test SMU message with 0x20000 offset + * to trigger SMU start + */ + fiji_send_msg_to_smc_offset(adev); + DRM_INFO("[FM]try triger smu start\n"); + /* Wait for done bit to be set */ + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixSMU_STATUS); + if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE)) + break; + udelay(1); + } + + if (i == adev->usec_timeout) { + DRM_ERROR("Timeout for SMU start\n"); + return -EINVAL; + } + + /* Check pass/failed indicator */ + val = RREG32_SMC(ixSMU_STATUS); + if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) { + DRM_ERROR("SMU Firmware start failed\n"); + return -EINVAL; + } + DRM_INFO("[FM]smu started\n"); + /* Wait for firmware to initialize */ + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixFIRMWARE_FLAGS); + if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED)) + break; + udelay(1); + } + + if (i == adev->usec_timeout) { + DRM_ERROR("SMU firmware initialization failed\n"); + return -EINVAL; + } + DRM_INFO("[FM]smu initialized\n"); + + return 0; +} + +static int fiji_smu_start_in_non_protection_mode(struct amdgpu_device *adev) +{ + int i, result; + uint32_t val; + + /* wait for smc boot up */ + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixRCU_UC_EVENTS); + val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done); + if (val) + break; + udelay(1); + } + + if (i == adev->usec_timeout) { + DRM_ERROR("SMC boot sequence is not completed\n"); + return -EINVAL; + } + + /* Clear firmware interrupt enable flag */ + WREG32_SMC(ixFIRMWARE_FLAGS, 0); + + /* Assert reset */ + val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); + val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); + WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); + + result = fiji_smu_upload_firmware_image(adev); + if (result) + return result; + + /* Set smc instruct start point at 0x0 */ + fiji_program_jump_on_start(adev); + + /* Enable clock */ + val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); + WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); + + /* De-assert reset */ + val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); + val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); + WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); + + /* Wait for firmware to initialize */ + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixFIRMWARE_FLAGS); + if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED)) + break; + udelay(1); + } + + if (i == adev->usec_timeout) { + DRM_ERROR("Timeout for SMC firmware initialization\n"); + return -EINVAL; + } + + return 0; +} + +int fiji_smu_start(struct amdgpu_device *adev) +{ + int result; + uint32_t val; + + if (!fiji_is_smc_ram_running(adev)) { + val = RREG32_SMC(ixSMU_FIRMWARE); + if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) { + DRM_INFO("[FM]start smu in nonprotection mode\n"); + result = fiji_smu_start_in_non_protection_mode(adev); + if (result) + return result; + } else { + DRM_INFO("[FM]start smu in protection mode\n"); + result = fiji_smu_start_in_protection_mode(adev); + if (result) + return result; + } + } + + return fiji_smu_request_load_fw(adev); +} + +static const struct amdgpu_smumgr_funcs fiji_smumgr_funcs = { + .check_fw_load_finish = fiji_smu_check_fw_load_finish, + .request_smu_load_fw = NULL, + .request_smu_specific_fw = NULL, +}; + +int fiji_smu_init(struct amdgpu_device *adev) +{ + struct fiji_smu_private_data *private; + uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; + uint32_t smu_internal_buffer_size = 200*4096; + struct amdgpu_bo **toc_buf = &adev->smu.toc_buf; + struct amdgpu_bo **smu_buf = &adev->smu.smu_buf; + uint64_t mc_addr; + void *toc_buf_ptr; + void *smu_buf_ptr; + int ret; + + private = kzalloc(sizeof(struct fiji_smu_private_data), GFP_KERNEL); + if (NULL == private) + return -ENOMEM; + + /* allocate firmware buffers */ + if (adev->firmware.smu_load) + amdgpu_ucode_init_bo(adev); + + adev->smu.priv = private; + adev->smu.fw_flags = 0; + + /* Allocate FW image data structure and header buffer */ + ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, + true, AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, toc_buf); + if (ret) { + DRM_ERROR("Failed to allocate memory for TOC buffer\n"); + return -ENOMEM; + } + + /* Allocate buffer for SMU internal buffer */ + ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, + true, AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, smu_buf); + if (ret) { + DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); + return -ENOMEM; + } + + /* Retrieve GPU address for header buffer and internal buffer */ + ret = amdgpu_bo_reserve(adev->smu.toc_buf, false); + if (ret) { + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to reserve the TOC buffer\n"); + return -EINVAL; + } + + ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); + if (ret) { + amdgpu_bo_unreserve(adev->smu.toc_buf); + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to pin the TOC buffer\n"); + return -EINVAL; + } + + ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr); + if (ret) { + amdgpu_bo_unreserve(adev->smu.toc_buf); + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to map the TOC buffer\n"); + return -EINVAL; + } + + amdgpu_bo_unreserve(adev->smu.toc_buf); + private->header_addr_low = lower_32_bits(mc_addr); + private->header_addr_high = upper_32_bits(mc_addr); + private->header = toc_buf_ptr; + + ret = amdgpu_bo_reserve(adev->smu.smu_buf, false); + if (ret) { + amdgpu_bo_unref(&adev->smu.smu_buf); + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to reserve the SMU internal buffer\n"); + return -EINVAL; + } + + ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); + if (ret) { + amdgpu_bo_unreserve(adev->smu.smu_buf); + amdgpu_bo_unref(&adev->smu.smu_buf); + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to pin the SMU internal buffer\n"); + return -EINVAL; + } + + ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr); + if (ret) { + amdgpu_bo_unreserve(adev->smu.smu_buf); + amdgpu_bo_unref(&adev->smu.smu_buf); + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to map the SMU internal buffer\n"); + return -EINVAL; + } + + amdgpu_bo_unreserve(adev->smu.smu_buf); + private->smu_buffer_addr_low = lower_32_bits(mc_addr); + private->smu_buffer_addr_high = upper_32_bits(mc_addr); + + adev->smu.smumgr_funcs = &fiji_smumgr_funcs; + + return 0; +} + +int fiji_smu_fini(struct amdgpu_device *adev) +{ + amdgpu_bo_unref(&adev->smu.toc_buf); + amdgpu_bo_unref(&adev->smu.smu_buf); + kfree(adev->smu.priv); + adev->smu.priv = NULL; + if (adev->firmware.fw_buf) + amdgpu_ucode_fini_bo(adev); + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h b/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h new file mode 100644 index 000000000..1cef03dee --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h @@ -0,0 +1,42 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef FIJI_SMUMGR_H +#define FIJI_SMUMGR_H + +#include "fiji_ppsmc.h" + +int fiji_smu_init(struct amdgpu_device *adev); +int fiji_smu_fini(struct amdgpu_device *adev); +int fiji_smu_start(struct amdgpu_device *adev); + +struct fiji_smu_private_data +{ + uint8_t *header; + uint32_t smu_buffer_addr_high; + uint32_t smu_buffer_addr_low; + uint32_t header_addr_high; + uint32_t header_addr_low; +}; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index b6c9618ad..042839813 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2144,7 +2144,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; adev->gfx.config.mem_max_burst_length_bytes = 256; - if (adev->flags & AMDGPU_IS_APU) { + if (adev->flags & AMD_IS_APU) { /* Get memory bank mapping mode. */ tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING); dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP); @@ -2619,6 +2619,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; + struct fence *f = NULL; uint32_t scratch; uint32_t tmp = 0; unsigned i; @@ -2630,29 +2631,27 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) return r; } WREG32(scratch, 0xCAFEDEAD); + memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(ring, NULL, 256, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); - amdgpu_gfx_scratch_free(adev, scratch); - return r; + goto err1; } ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); - if (r) { - amdgpu_gfx_scratch_free(adev, scratch); - amdgpu_ib_free(adev, &ib); - DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); - return r; - } - r = amdgpu_fence_wait(ib.fence, false); + + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, + AMDGPU_FENCE_OWNER_UNDEFINED, + &f); + if (r) + goto err2; + + r = fence_wait(f, false); if (r) { DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - amdgpu_gfx_scratch_free(adev, scratch); - amdgpu_ib_free(adev, &ib); - return r; + goto err2; } for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(scratch); @@ -2662,14 +2661,19 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ib.fence->ring->idx, i); + ring->idx, i); + goto err2; } else { DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); r = -EINVAL; } - amdgpu_gfx_scratch_free(adev, scratch); + +err2: + fence_put(f); amdgpu_ib_free(adev, &ib); +err1: + amdgpu_gfx_scratch_free(adev, scratch); return r; } @@ -3173,7 +3177,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, &adev->gfx.mec.hpd_eop_obj); if (r) { dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); @@ -3340,7 +3344,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, sizeof(struct bonaire_mqd), PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, &ring->mqd_obj); if (r) { dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); @@ -3577,41 +3581,6 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev) return 0; } -static void gfx_v7_0_ce_sync_me(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4; - - /* instruct DE to set a magic number */ - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | - WRITE_DATA_DST_SEL(5))); - amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 1); - - /* let CE wait till condition satisfied */ - amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); - amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ - WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ - WAIT_REG_MEM_FUNCTION(3) | /* == */ - WAIT_REG_MEM_ENGINE(2))); /* ce */ - amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 1); - amdgpu_ring_write(ring, 0xffffffff); - amdgpu_ring_write(ring, 4); /* poll interval */ - - /* instruct CE to reset wb of ce_sync to zero */ - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | - WRITE_DATA_DST_SEL(5) | - WR_CONFIRM)); - amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 0); -} - /* * vm * VMID 0 is the physical GPU addresses as used by the kernel. @@ -3630,6 +3599,13 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vm_id, uint64_t pd_addr) { int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); + if (usepfp) { + /* synce CE with ME to prevent CE fetch CEIB before context switch done */ + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + } amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | @@ -3670,7 +3646,10 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, 0x0); /* synce CE with ME to prevent CE fetch CEIB before context switch done */ - gfx_v7_0_ce_sync_me(ring); + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); } } @@ -3729,7 +3708,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) int r; /* allocate rlc buffers */ - if (adev->flags & AMDGPU_IS_APU) { + if (adev->flags & AMD_IS_APU) { if (adev->asic_type == CHIP_KAVERI) { adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list; adev->gfx.rlc.reg_list_size = @@ -3753,7 +3732,10 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) /* save restore block */ if (adev->gfx.rlc.save_restore_obj == NULL) { r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.save_restore_obj); + AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, + &adev->gfx.rlc.save_restore_obj); if (r) { dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); return r; @@ -3794,7 +3776,10 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) if (adev->gfx.rlc.clear_state_obj == NULL) { r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.clear_state_obj); + AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, + &adev->gfx.rlc.clear_state_obj); if (r) { dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); gfx_v7_0_rlc_fini(adev); @@ -3831,7 +3816,10 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) if (adev->gfx.rlc.cp_table_size) { if (adev->gfx.rlc.cp_table_obj == NULL) { r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.cp_table_obj); + AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, + &adev->gfx.rlc.cp_table_obj); if (r) { dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); gfx_v7_0_rlc_fini(adev); @@ -4763,12 +4751,6 @@ static int gfx_v7_0_sw_init(void *handle) return r; } - r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs); - if (r) { - DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r); - return r; - } - for (i = 0; i < adev->gfx.num_gfx_rings; i++) { ring = &adev->gfx.gfx_ring[i]; ring->ring_obj = NULL; @@ -4812,21 +4794,21 @@ static int gfx_v7_0_sw_init(void *handle) r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GDS, 0, - NULL, &adev->gds.gds_gfx_bo); + NULL, NULL, &adev->gds.gds_gfx_bo); if (r) return r; r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GWS, 0, - NULL, &adev->gds.gws_gfx_bo); + NULL, NULL, &adev->gds.gws_gfx_bo); if (r) return r; r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_OA, 0, - NULL, &adev->gds.oa_gfx_bo); + NULL, NULL, &adev->gds.oa_gfx_bo); if (r) return r; @@ -4847,8 +4829,6 @@ static int gfx_v7_0_sw_fini(void *handle) for (i = 0; i < adev->gfx.num_compute_rings; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); - amdgpu_wb_free(adev, adev->gfx.ce_sync_offs); - gfx_v7_0_cp_compute_fini(adev); gfx_v7_0_rlc_fini(adev); gfx_v7_0_mec_fini(adev); @@ -5565,6 +5545,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { .test_ring = gfx_v7_0_ring_test_ring, .test_ib = gfx_v7_0_ring_test_ib, .is_lockup = gfx_v7_0_ring_is_lockup, + .insert_nop = amdgpu_ring_insert_nop, }; static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { @@ -5581,6 +5562,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { .test_ring = gfx_v7_0_ring_test_ring, .test_ib = gfx_v7_0_ring_test_ib, .is_lockup = gfx_v7_0_ring_is_lockup, + .insert_nop = amdgpu_ring_insert_nop, }; static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index d90eb50a8..e6117915b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -198,6 +198,71 @@ static const u32 tonga_mgcg_cgcg_init[] = mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, }; +static const u32 fiji_golden_common_all[] = +{ + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a, + mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e, + mmGB_ADDR_CONFIG, 0xffffffff, 0x12011003, + mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, + mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF +}; + +static const u32 golden_settings_fiji_a10[] = +{ + mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, + mmDB_DEBUG2, 0xf00fffff, 0x00000400, + mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, + mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x00000100, + mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, + mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, + mmTCC_CTRL, 0x00100000, 0xf30fff7f, + mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff, + mmTCP_CHAN_STEER_HI, 0xffffffff, 0x7d6cf5e4, + mmTCP_CHAN_STEER_LO, 0xffffffff, 0x3928b1a0, +}; + +static const u32 fiji_mgcg_cgcg_init[] = +{ + mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffc0, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100, + mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, + mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100, + mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100, + mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100, + mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100, + mmTA_CGTT_CTRL, 0xffffffff, 0x00000100, + mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTD_CGTT_CTRL, 0xffffffff, 0x00000100, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, + mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, + mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, + mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, +}; + static const u32 golden_settings_iceland_a11[] = { mmCB_HW_CONTROL_3, 0x00000040, 0x00000040, @@ -420,6 +485,18 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) iceland_golden_common_all, (const u32)ARRAY_SIZE(iceland_golden_common_all)); break; + case CHIP_FIJI: + amdgpu_program_register_sequence(adev, + fiji_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + golden_settings_fiji_a10, + (const u32)ARRAY_SIZE(golden_settings_fiji_a10)); + amdgpu_program_register_sequence(adev, + fiji_golden_common_all, + (const u32)ARRAY_SIZE(fiji_golden_common_all)); + break; + case CHIP_TONGA: amdgpu_program_register_sequence(adev, tonga_mgcg_cgcg_init, @@ -507,6 +584,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; + struct fence *f = NULL; uint32_t scratch; uint32_t tmp = 0; unsigned i; @@ -518,29 +596,27 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) return r; } WREG32(scratch, 0xCAFEDEAD); + memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(ring, NULL, 256, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); - amdgpu_gfx_scratch_free(adev, scratch); - return r; + goto err1; } ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); - if (r) { - amdgpu_gfx_scratch_free(adev, scratch); - amdgpu_ib_free(adev, &ib); - DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); - return r; - } - r = amdgpu_fence_wait(ib.fence, false); + + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, + AMDGPU_FENCE_OWNER_UNDEFINED, + &f); + if (r) + goto err2; + + r = fence_wait(f, false); if (r) { DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - amdgpu_gfx_scratch_free(adev, scratch); - amdgpu_ib_free(adev, &ib); - return r; + goto err2; } for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(scratch); @@ -550,14 +626,18 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ib.fence->ring->idx, i); + ring->idx, i); + goto err2; } else { DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); r = -EINVAL; } - amdgpu_gfx_scratch_free(adev, scratch); +err2: + fence_put(f); amdgpu_ib_free(adev, &ib); +err1: + amdgpu_gfx_scratch_free(adev, scratch); return r; } @@ -582,6 +662,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) case CHIP_CARRIZO: chip_name = "carrizo"; break; + case CHIP_FIJI: + chip_name = "fiji"; + break; default: BUG(); } @@ -759,7 +842,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, &adev->gfx.mec.hpd_eop_obj); if (r) { dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); @@ -831,12 +914,6 @@ static int gfx_v8_0_sw_init(void *handle) return r; } - r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs); - if (r) { - DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r); - return r; - } - /* set up the gfx ring */ for (i = 0; i < adev->gfx.num_gfx_rings; i++) { ring = &adev->gfx.gfx_ring[i]; @@ -886,21 +963,21 @@ static int gfx_v8_0_sw_init(void *handle) /* reserve GDS, GWS and OA resource for gfx */ r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GDS, 0, + AMDGPU_GEM_DOMAIN_GDS, 0, NULL, NULL, &adev->gds.gds_gfx_bo); if (r) return r; r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GWS, 0, + AMDGPU_GEM_DOMAIN_GWS, 0, NULL, NULL, &adev->gds.gws_gfx_bo); if (r) return r; r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_OA, 0, + AMDGPU_GEM_DOMAIN_OA, 0, NULL, NULL, &adev->gds.oa_gfx_bo); if (r) return r; @@ -924,8 +1001,6 @@ static int gfx_v8_0_sw_fini(void *handle) for (i = 0; i < adev->gfx.num_compute_rings; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); - amdgpu_wb_free(adev, adev->gfx.ce_sync_offs); - gfx_v8_0_mec_fini(adev); return 0; @@ -1217,6 +1292,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); } + case CHIP_FIJI: case CHIP_TONGA: for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { switch (reg_offset) { @@ -1895,7 +1971,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev, } /** - * gmc_v8_0_init_compute_vmid - gart enable + * gfx_v8_0_init_compute_vmid - gart enable * * @rdev: amdgpu_device pointer * @@ -1905,7 +1981,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev, #define DEFAULT_SH_MEM_BASES (0x6000) #define FIRST_COMPUTE_VMID (8) #define LAST_COMPUTE_VMID (16) -static void gmc_v8_0_init_compute_vmid(struct amdgpu_device *adev) +static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev) { int i; uint32_t sh_mem_config; @@ -1965,6 +2041,23 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN; break; + case CHIP_FIJI: + adev->gfx.config.max_shader_engines = 4; + adev->gfx.config.max_tile_pipes = 16; + adev->gfx.config.max_cu_per_sh = 16; + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 4; + adev->gfx.config.max_texture_channel_caches = 8; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 32; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN; + break; case CHIP_TONGA: adev->gfx.config.max_shader_engines = 4; adev->gfx.config.max_tile_pipes = 8; @@ -2059,7 +2152,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; adev->gfx.config.mem_max_burst_length_bytes = 256; - if (adev->flags & AMDGPU_IS_APU) { + if (adev->flags & AMD_IS_APU) { /* Get memory bank mapping mode. */ tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING); dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP); @@ -2155,7 +2248,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - gmc_v8_0_init_compute_vmid(adev); + gfx_v8_0_init_compute_vmid(adev); mutex_lock(&adev->grbm_idx_mutex); /* @@ -2471,6 +2564,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); switch (adev->asic_type) { case CHIP_TONGA: + case CHIP_FIJI: amdgpu_ring_write(ring, 0x16000012); amdgpu_ring_write(ring, 0x0000002A); break; @@ -2978,7 +3072,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) sizeof(struct vi_mqd), PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, - &ring->mqd_obj); + NULL, &ring->mqd_obj); if (r) { dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); return r; @@ -3112,7 +3206,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) /* enable the doorbell if requested */ if (use_doorbell) { - if (adev->asic_type == CHIP_CARRIZO) { + if ((adev->asic_type == CHIP_CARRIZO) || + (adev->asic_type == CHIP_FIJI)) { WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2); WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, @@ -3836,6 +3931,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); amdgpu_ring_write(ring, lower_32_bits(seq)); amdgpu_ring_write(ring, upper_32_bits(seq)); + } /** @@ -3856,7 +3952,8 @@ static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring, unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; if (ring->adev->asic_type == CHIP_TOPAZ || - ring->adev->asic_type == CHIP_TONGA) + ring->adev->asic_type == CHIP_TONGA || + ring->adev->asic_type == CHIP_FIJI) /* we got a hw semaphore bug in VI TONGA, return false to switch back to sw fence wait */ return false; else { @@ -3875,49 +3972,34 @@ static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring, return true; } -static void gfx_v8_0_ce_sync_me(struct amdgpu_ring *ring) +static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vm_id, uint64_t pd_addr) { - struct amdgpu_device *adev = ring->adev; - u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4; - - /* instruct DE to set a magic number */ - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | - WRITE_DATA_DST_SEL(5))); - amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 1); + int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); + uint32_t seq = ring->fence_drv.sync_seq[ring->idx]; + uint64_t addr = ring->fence_drv.gpu_addr; - /* let CE wait till condition satisfied */ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); - amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ - WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ - WAIT_REG_MEM_FUNCTION(3) | /* == */ - WAIT_REG_MEM_ENGINE(2))); /* ce */ - amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 1); + amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ + WAIT_REG_MEM_FUNCTION(3))); /* equal */ + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); + amdgpu_ring_write(ring, seq); amdgpu_ring_write(ring, 0xffffffff); amdgpu_ring_write(ring, 4); /* poll interval */ - /* instruct CE to reset wb of ce_sync to zero */ - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | - WRITE_DATA_DST_SEL(5) | - WR_CONFIRM)); - amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 0); -} - -static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) -{ - int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); + if (usepfp) { + /* synce CE with ME to prevent CE fetch CEIB before context switch done */ + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + } amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | - WRITE_DATA_DST_SEL(0))); + WRITE_DATA_DST_SEL(0)) | + WR_CONFIRM); if (vm_id < 8) { amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); @@ -3953,9 +4035,10 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, /* sync PFP to ME, otherwise we might get invalid PFP reads */ amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); amdgpu_ring_write(ring, 0x0); - - /* synce CE with ME to prevent CE fetch CEIB before context switch done */ - gfx_v8_0_ce_sync_me(ring); + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); } } @@ -4249,6 +4332,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { .test_ring = gfx_v8_0_ring_test_ring, .test_ib = gfx_v8_0_ring_test_ib, .is_lockup = gfx_v8_0_ring_is_lockup, + .insert_nop = amdgpu_ring_insert_nop, }; static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { @@ -4265,6 +4349,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { .test_ring = gfx_v8_0_ring_test_ring, .test_ib = gfx_v8_0_ring_test_ib, .is_lockup = gfx_v8_0_ring_is_lockup, + .insert_nop = amdgpu_ring_insert_nop, }; static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 98e797775..8b60b8fda 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -522,17 +522,11 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) tmp = RREG32(mmVM_CONTEXT1_CNTL); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, amdgpu_vm_block_size - 9); @@ -635,7 +629,7 @@ static int gmc_v7_0_vm_init(struct amdgpu_device *adev) adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; /* base offset of vram pages */ - if (adev->flags & AMDGPU_IS_APU) { + if (adev->flags & AMD_IS_APU) { u64 tmp = RREG32(mmMC_VM_FB_OFFSET); tmp <<= 22; adev->vm_manager.vram_base_offset = tmp; @@ -840,7 +834,7 @@ static int gmc_v7_0_early_init(void *handle) gmc_v7_0_set_gart_funcs(adev); gmc_v7_0_set_irq_funcs(adev); - if (adev->flags & AMDGPU_IS_APU) { + if (adev->flags & AMD_IS_APU) { adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; } else { u32 tmp = RREG32(mmMC_SEQ_MISC0); @@ -851,6 +845,13 @@ static int gmc_v7_0_early_init(void *handle) return 0; } +static int gmc_v7_0_late_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); +} + static int gmc_v7_0_sw_init(void *handle) { int r; @@ -956,7 +957,7 @@ static int gmc_v7_0_hw_init(void *handle) gmc_v7_0_mc_program(adev); - if (!(adev->flags & AMDGPU_IS_APU)) { + if (!(adev->flags & AMD_IS_APU)) { r = gmc_v7_0_mc_load_microcode(adev); if (r) { DRM_ERROR("Failed to load MC firmware!\n"); @@ -975,6 +976,7 @@ static int gmc_v7_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_irq_put(adev, &adev->mc.vm_fault, 0); gmc_v7_0_gart_disable(adev); return 0; @@ -1171,7 +1173,7 @@ static int gmc_v7_0_soft_reset(void *handle) if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { - if (!(adev->flags & AMDGPU_IS_APU)) + if (!(adev->flags & AMD_IS_APU)) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1); } @@ -1259,6 +1261,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); + /* reset addr and status */ + WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); + + if (!addr && !status) + return 0; + dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", entry->src_id, entry->src_data); dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", @@ -1266,8 +1274,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); - /* reset addr and status */ - WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); return 0; } @@ -1281,7 +1287,7 @@ static int gmc_v7_0_set_clockgating_state(void *handle, if (state == AMD_CG_STATE_GATE) gate = true; - if (!(adev->flags & AMDGPU_IS_APU)) { + if (!(adev->flags & AMD_IS_APU)) { gmc_v7_0_enable_mc_mgcg(adev, gate); gmc_v7_0_enable_mc_ls(adev, gate); } @@ -1300,7 +1306,7 @@ static int gmc_v7_0_set_powergating_state(void *handle, const struct amd_ip_funcs gmc_v7_0_ip_funcs = { .early_init = gmc_v7_0_early_init, - .late_init = NULL, + .late_init = gmc_v7_0_late_init, .sw_init = gmc_v7_0_sw_init, .sw_fini = gmc_v7_0_sw_fini, .hw_init = gmc_v7_0_hw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 4d17921b8..1f788f36c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -60,6 +60,19 @@ static const u32 tonga_mgcg_cgcg_init[] = mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 }; +static const u32 golden_settings_fiji_a10[] = +{ + mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, +}; + +static const u32 fiji_mgcg_cgcg_init[] = +{ + mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 +}; + static const u32 golden_settings_iceland_a11[] = { mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, @@ -89,6 +102,14 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_iceland_a11, (const u32)ARRAY_SIZE(golden_settings_iceland_a11)); break; + case CHIP_FIJI: + amdgpu_program_register_sequence(adev, + fiji_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + golden_settings_fiji_a10, + (const u32)ARRAY_SIZE(golden_settings_fiji_a10)); + break; case CHIP_TONGA: amdgpu_program_register_sequence(adev, tonga_mgcg_cgcg_init, @@ -201,6 +222,9 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) case CHIP_TONGA: chip_name = "tonga"; break; + case CHIP_FIJI: + chip_name = "fiji"; + break; case CHIP_CARRIZO: return 0; default: BUG(); @@ -627,19 +651,12 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) tmp = RREG32(mmVM_CONTEXT1_CNTL); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, amdgpu_vm_block_size - 9); @@ -736,7 +753,7 @@ static int gmc_v8_0_vm_init(struct amdgpu_device *adev) adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; /* base offset of vram pages */ - if (adev->flags & AMDGPU_IS_APU) { + if (adev->flags & AMD_IS_APU) { u64 tmp = RREG32(mmMC_VM_FB_OFFSET); tmp <<= 22; adev->vm_manager.vram_base_offset = tmp; @@ -815,7 +832,7 @@ static int gmc_v8_0_early_init(void *handle) gmc_v8_0_set_gart_funcs(adev); gmc_v8_0_set_irq_funcs(adev); - if (adev->flags & AMDGPU_IS_APU) { + if (adev->flags & AMD_IS_APU) { adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; } else { u32 tmp = RREG32(mmMC_SEQ_MISC0); @@ -826,6 +843,13 @@ static int gmc_v8_0_early_init(void *handle) return 0; } +static int gmc_v8_0_late_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); +} + static int gmc_v8_0_sw_init(void *handle) { int r; @@ -933,7 +957,7 @@ static int gmc_v8_0_hw_init(void *handle) gmc_v8_0_mc_program(adev); - if (!(adev->flags & AMDGPU_IS_APU)) { + if (!(adev->flags & AMD_IS_APU)) { r = gmc_v8_0_mc_load_microcode(adev); if (r) { DRM_ERROR("Failed to load MC firmware!\n"); @@ -952,6 +976,7 @@ static int gmc_v8_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_irq_put(adev, &adev->mc.vm_fault, 0); gmc_v8_0_gart_disable(adev); return 0; @@ -1146,7 +1171,7 @@ static int gmc_v8_0_soft_reset(void *handle) if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { - if (!(adev->flags & AMDGPU_IS_APU)) + if (!(adev->flags & AMD_IS_APU)) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1); } @@ -1235,6 +1260,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); + /* reset addr and status */ + WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); + + if (!addr && !status) + return 0; + dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", entry->src_id, entry->src_data); dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", @@ -1242,8 +1273,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); - /* reset addr and status */ - WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); return 0; } @@ -1262,7 +1291,7 @@ static int gmc_v8_0_set_powergating_state(void *handle, const struct amd_ip_funcs gmc_v8_0_ip_funcs = { .early_init = gmc_v8_0_early_init, - .late_init = NULL, + .late_init = gmc_v8_0_late_init, .sw_init = gmc_v8_0_sw_init, .sw_fini = gmc_v8_0_sw_fini, .hw_init = gmc_v8_0_hw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h index c723602c7..ee6a041cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h +++ b/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h @@ -2163,5 +2163,10 @@ #define SDMA_PKT_NOP_HEADER_sub_op_shift 8 #define SDMA_PKT_NOP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_NOP_HEADER_sub_op_mask) << SDMA_PKT_NOP_HEADER_sub_op_shift) +/*define for count field*/ +#define SDMA_PKT_NOP_HEADER_count_offset 0 +#define SDMA_PKT_NOP_HEADER_count_mask 0x00003FFF +#define SDMA_PKT_NOP_HEADER_count_shift 16 +#define SDMA_PKT_NOP_HEADER_COUNT(x) (((x) & SDMA_PKT_NOP_HEADER_count_mask) << SDMA_PKT_NOP_HEADER_count_shift) #endif /* __ICELAND_SDMA_PKT_OPEN_H_ */ diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c index c6f1e2f12..966d4b2ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c @@ -623,7 +623,9 @@ int iceland_smu_init(struct amdgpu_device *adev) /* Allocate FW image data structure and header buffer */ ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf); + true, AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, toc_buf); if (ret) { DRM_ERROR("Failed to allocate memory for TOC buffer\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 9745ed3a9..7e9154c7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -2997,6 +2997,9 @@ static int kv_dpm_late_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret; + if (!amdgpu_dpm) + return 0; + /* init the sysfs and debugfs files late */ ret = amdgpu_pm_sysfs_init(adev); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/pptable.h b/drivers/gpu/drm/amd/amdgpu/pptable.h deleted file mode 100644 index 0030f726e..000000000 --- a/drivers/gpu/drm/amd/amdgpu/pptable.h +++ /dev/null @@ -1,698 +0,0 @@ -/* - * Copyright 2013 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef _PPTABLE_H -#define _PPTABLE_H - -#pragma pack(1) - -typedef struct _ATOM_PPLIB_THERMALCONTROLLER - -{ - UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_* - UCHAR ucI2cLine; // as interpreted by DAL I2C - UCHAR ucI2cAddress; - UCHAR ucFanParameters; // Fan Control Parameters. - UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only. - UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only. - UCHAR ucReserved; // ---- - UCHAR ucFlags; // to be defined -} ATOM_PPLIB_THERMALCONTROLLER; - -#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f -#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller. - -#define ATOM_PP_THERMALCONTROLLER_NONE 0 -#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib -#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib -#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib -#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib -#define ATOM_PP_THERMALCONTROLLER_LM64 5 -#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib -#define ATOM_PP_THERMALCONTROLLER_RV6xx 7 -#define ATOM_PP_THERMALCONTROLLER_RV770 8 -#define ATOM_PP_THERMALCONTROLLER_ADT7473 9 -#define ATOM_PP_THERMALCONTROLLER_KONG 10 -#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11 -#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12 -#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen. -#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally -#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15 -#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16 -#define ATOM_PP_THERMALCONTROLLER_LM96163 17 -#define ATOM_PP_THERMALCONTROLLER_CISLANDS 18 -#define ATOM_PP_THERMALCONTROLLER_KAVERI 19 - - -// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal. -// We probably should reserve the bit 0x80 for this use. -// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here). -// The driver can pick the correct internal controller based on the ASIC. - -#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller -#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller - -typedef struct _ATOM_PPLIB_STATE -{ - UCHAR ucNonClockStateIndex; - UCHAR ucClockStateIndices[1]; // variable-sized -} ATOM_PPLIB_STATE; - - -typedef struct _ATOM_PPLIB_FANTABLE -{ - UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same. - UCHAR ucTHyst; // Temperature hysteresis. Integer. - USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. - USHORT usTMed; // The middle temperature where we change slopes. - USHORT usTHigh; // The high point above TMed for adjusting the second slope. - USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments). - USHORT usPWMMed; // The PWM value (in percent) at TMed. - USHORT usPWMHigh; // The PWM value at THigh. -} ATOM_PPLIB_FANTABLE; - -typedef struct _ATOM_PPLIB_FANTABLE2 -{ - ATOM_PPLIB_FANTABLE basicTable; - USHORT usTMax; // The max temperature -} ATOM_PPLIB_FANTABLE2; - -typedef struct _ATOM_PPLIB_FANTABLE3 -{ - ATOM_PPLIB_FANTABLE2 basicTable2; - UCHAR ucFanControlMode; - USHORT usFanPWMMax; - USHORT usFanOutputSensitivity; -} ATOM_PPLIB_FANTABLE3; - -typedef struct _ATOM_PPLIB_EXTENDEDHEADER -{ - USHORT usSize; - ULONG ulMaxEngineClock; // For Overdrive. - ULONG ulMaxMemoryClock; // For Overdrive. - // Add extra system parameters here, always adjust size to include all fields. - USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table - USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table - USHORT usSAMUTableOffset; //points to ATOM_PPLIB_SAMU_Table - USHORT usPPMTableOffset; //points to ATOM_PPLIB_PPM_Table - USHORT usACPTableOffset; //points to ATOM_PPLIB_ACP_Table - /* points to ATOM_PPLIB_POWERTUNE_Table */ - USHORT usPowerTuneTableOffset; - /* points to ATOM_PPLIB_CLOCK_Voltage_Dependency_Table for sclkVddgfxTable */ - USHORT usSclkVddgfxTableOffset; -} ATOM_PPLIB_EXTENDEDHEADER; - -//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps -#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1 -#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2 -#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4 -#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8 -#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16 -#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32 -#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64 -#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128 -#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256 -#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512 -#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024 -#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048 -#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096 -#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition. -#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition). -#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC. -#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature. -#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state. -#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE 0x00040000 // Does the driver supports new CAC voltage table. -#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY 0x00080000 // Does the driver supports revert GPIO5 polarity. -#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17 0x00100000 // Does the driver supports thermal2GPIO17. -#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE 0x00200000 // Does the driver supports VR HOT GPIO Configurable. -#define ATOM_PP_PLATFORM_CAP_TEMP_INVERSION 0x00400000 // Does the driver supports Temp Inversion feature. -#define ATOM_PP_PLATFORM_CAP_EVV 0x00800000 - -typedef struct _ATOM_PPLIB_POWERPLAYTABLE -{ - ATOM_COMMON_TABLE_HEADER sHeader; - - UCHAR ucDataRevision; - - UCHAR ucNumStates; - UCHAR ucStateEntrySize; - UCHAR ucClockInfoSize; - UCHAR ucNonClockSize; - - // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures - USHORT usStateArrayOffset; - - // offset from start of this table to array of ASIC-specific structures, - // currently ATOM_PPLIB_CLOCK_INFO. - USHORT usClockInfoArrayOffset; - - // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO - USHORT usNonClockInfoArrayOffset; - - USHORT usBackbiasTime; // in microseconds - USHORT usVoltageTime; // in microseconds - USHORT usTableSize; //the size of this structure, or the extended structure - - ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_* - - ATOM_PPLIB_THERMALCONTROLLER sThermalController; - - USHORT usBootClockInfoOffset; - USHORT usBootNonClockInfoOffset; - -} ATOM_PPLIB_POWERPLAYTABLE; - -typedef struct _ATOM_PPLIB_POWERPLAYTABLE2 -{ - ATOM_PPLIB_POWERPLAYTABLE basicTable; - UCHAR ucNumCustomThermalPolicy; - USHORT usCustomThermalPolicyArrayOffset; -}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2; - -typedef struct _ATOM_PPLIB_POWERPLAYTABLE3 -{ - ATOM_PPLIB_POWERPLAYTABLE2 basicTable2; - USHORT usFormatID; // To be used ONLY by PPGen. - USHORT usFanTableOffset; - USHORT usExtendendedHeaderOffset; -} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3; - -typedef struct _ATOM_PPLIB_POWERPLAYTABLE4 -{ - ATOM_PPLIB_POWERPLAYTABLE3 basicTable3; - ULONG ulGoldenPPID; // PPGen use only - ULONG ulGoldenRevision; // PPGen use only - USHORT usVddcDependencyOnSCLKOffset; - USHORT usVddciDependencyOnMCLKOffset; - USHORT usVddcDependencyOnMCLKOffset; - USHORT usMaxClockVoltageOnDCOffset; - USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table - USHORT usMvddDependencyOnMCLKOffset; -} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4; - -typedef struct _ATOM_PPLIB_POWERPLAYTABLE5 -{ - ATOM_PPLIB_POWERPLAYTABLE4 basicTable4; - ULONG ulTDPLimit; - ULONG ulNearTDPLimit; - ULONG ulSQRampingThreshold; - USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table - ULONG ulCACLeakage; // The iLeakage for driver calculated CAC leakage table - USHORT usTDPODLimit; - USHORT usLoadLineSlope; // in milliOhms * 100 -} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5; - -//// ATOM_PPLIB_NONCLOCK_INFO::usClassification -#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 -#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 -#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0 -#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1 -#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3 -#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5 -// 2, 4, 6, 7 are reserved - -#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008 -#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010 -#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020 -#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040 -#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080 -#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100 -#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200 -#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400 -#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800 -#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000 -#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000 -#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000 -#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000 - -//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2 -#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001 -#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002 -#define ATOM_PPLIB_CLASSIFICATION2_MVC 0x0004 //Multi-View Codec (BD-3D) - -//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings -#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001 -#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002 - -// 0 is 2.5Gb/s, 1 is 5Gb/s -#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004 -#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2 - -// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec -#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8 -#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3 - -// lookup into reduced refresh-rate table -#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00 -#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8 - -#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0 -#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1 -// 2-15 TBD as needed. - -#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000 -#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000 - -#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000 - -#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000 - -//memory related flags -#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000 - -//M3 Arb //2bits, current 3 sets of parameters in total -#define ATOM_PPLIB_M3ARB_MASK 0x00060000 -#define ATOM_PPLIB_M3ARB_SHIFT 17 - -#define ATOM_PPLIB_ENABLE_DRR 0x00080000 - -// remaining 16 bits are reserved -typedef struct _ATOM_PPLIB_THERMAL_STATE -{ - UCHAR ucMinTemperature; - UCHAR ucMaxTemperature; - UCHAR ucThermalAction; -}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE; - -// Contained in an array starting at the offset -// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset. -// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex -#define ATOM_PPLIB_NONCLOCKINFO_VER1 12 -#define ATOM_PPLIB_NONCLOCKINFO_VER2 24 -typedef struct _ATOM_PPLIB_NONCLOCK_INFO -{ - USHORT usClassification; - UCHAR ucMinTemperature; - UCHAR ucMaxTemperature; - ULONG ulCapsAndSettings; - UCHAR ucRequiredPower; - USHORT usClassification2; - ULONG ulVCLK; - ULONG ulDCLK; - UCHAR ucUnused[5]; -} ATOM_PPLIB_NONCLOCK_INFO; - -// Contained in an array starting at the offset -// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset. -// referenced from ATOM_PPLIB_STATE::ucClockStateIndices -typedef struct _ATOM_PPLIB_R600_CLOCK_INFO -{ - USHORT usEngineClockLow; - UCHAR ucEngineClockHigh; - - USHORT usMemoryClockLow; - UCHAR ucMemoryClockHigh; - - USHORT usVDDC; - USHORT usUnused1; - USHORT usUnused2; - - ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_* - -} ATOM_PPLIB_R600_CLOCK_INFO; - -// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO -#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1 -#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2 -#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4 -#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8 -#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16 -#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0). - -typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO - -{ - USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600). - UCHAR ucLowEngineClockHigh; - USHORT usHighEngineClockLow; // High Engine clock in MHz. - UCHAR ucHighEngineClockHigh; - USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants. - UCHAR ucMemoryClockHigh; // Currentyl unused. - UCHAR ucPadding; // For proper alignment and size. - USHORT usVDDC; // For the 780, use: None, Low, High, Variable - UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16} - UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could - USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200). - ULONG ulFlags; -} ATOM_PPLIB_RS780_CLOCK_INFO; - -#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0 -#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1 -#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2 -#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3 - -#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is. -#define ATOM_PPLIB_RS780_SPMCLK_LOW 1 -#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2 - -#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0 -#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1 -#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2 - -typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO -{ - USHORT usEngineClockLow; - UCHAR ucEngineClockHigh; - - USHORT usMemoryClockLow; - UCHAR ucMemoryClockHigh; - - USHORT usVDDC; - USHORT usVDDCI; - USHORT usUnused; - - ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_* - -} ATOM_PPLIB_EVERGREEN_CLOCK_INFO; - -typedef struct _ATOM_PPLIB_SI_CLOCK_INFO -{ - USHORT usEngineClockLow; - UCHAR ucEngineClockHigh; - - USHORT usMemoryClockLow; - UCHAR ucMemoryClockHigh; - - USHORT usVDDC; - USHORT usVDDCI; - UCHAR ucPCIEGen; - UCHAR ucUnused1; - - ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now - -} ATOM_PPLIB_SI_CLOCK_INFO; - -typedef struct _ATOM_PPLIB_CI_CLOCK_INFO -{ - USHORT usEngineClockLow; - UCHAR ucEngineClockHigh; - - USHORT usMemoryClockLow; - UCHAR ucMemoryClockHigh; - - UCHAR ucPCIEGen; - USHORT usPCIELane; -} ATOM_PPLIB_CI_CLOCK_INFO; - -typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{ - USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz - UCHAR ucEngineClockHigh; //clockfrequency >> 16. - UCHAR vddcIndex; //2-bit vddc index; - USHORT tdpLimit; - //please initalize to 0 - USHORT rsv1; - //please initialize to 0s - ULONG rsv2[2]; -}ATOM_PPLIB_SUMO_CLOCK_INFO; - -typedef struct _ATOM_PPLIB_CZ_CLOCK_INFO { - UCHAR index; - UCHAR rsv[3]; -} ATOM_PPLIB_CZ_CLOCK_INFO; - -typedef struct _ATOM_PPLIB_STATE_V2 -{ - //number of valid dpm levels in this state; Driver uses it to calculate the whole - //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR) - UCHAR ucNumDPMLevels; - - //a index to the array of nonClockInfos - UCHAR nonClockInfoIndex; - /** - * Driver will read the first ucNumDPMLevels in this array - */ - UCHAR clockInfoIndex[1]; -} ATOM_PPLIB_STATE_V2; - -typedef struct _StateArray{ - //how many states we have - UCHAR ucNumEntries; - - ATOM_PPLIB_STATE_V2 states[1]; -}StateArray; - - -typedef struct _ClockInfoArray{ - //how many clock levels we have - UCHAR ucNumEntries; - - //sizeof(ATOM_PPLIB_CLOCK_INFO) - UCHAR ucEntrySize; - - UCHAR clockInfo[1]; -}ClockInfoArray; - -typedef struct _NonClockInfoArray{ - - //how many non-clock levels we have. normally should be same as number of states - UCHAR ucNumEntries; - //sizeof(ATOM_PPLIB_NONCLOCK_INFO) - UCHAR ucEntrySize; - - ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1]; -}NonClockInfoArray; - -typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record -{ - USHORT usClockLow; - UCHAR ucClockHigh; - USHORT usVoltage; -}ATOM_PPLIB_Clock_Voltage_Dependency_Record; - -typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table -{ - UCHAR ucNumEntries; // Number of entries. - ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries. -}ATOM_PPLIB_Clock_Voltage_Dependency_Table; - -typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record -{ - USHORT usSclkLow; - UCHAR ucSclkHigh; - USHORT usMclkLow; - UCHAR ucMclkHigh; - USHORT usVddc; - USHORT usVddci; -}ATOM_PPLIB_Clock_Voltage_Limit_Record; - -typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table -{ - UCHAR ucNumEntries; // Number of entries. - ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries. -}ATOM_PPLIB_Clock_Voltage_Limit_Table; - -union _ATOM_PPLIB_CAC_Leakage_Record -{ - struct - { - USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value. in CI we read it as StdVoltageHiSidd - ULONG ulLeakageValue; // For CI and newer we use this as the "fake" standar VDDC value. in CI we read it as StdVoltageLoSidd - - }; - struct - { - USHORT usVddc1; - USHORT usVddc2; - USHORT usVddc3; - }; -}; - -typedef union _ATOM_PPLIB_CAC_Leakage_Record ATOM_PPLIB_CAC_Leakage_Record; - -typedef struct _ATOM_PPLIB_CAC_Leakage_Table -{ - UCHAR ucNumEntries; // Number of entries. - ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries. -}ATOM_PPLIB_CAC_Leakage_Table; - -typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record -{ - USHORT usVoltage; - USHORT usSclkLow; - UCHAR ucSclkHigh; - USHORT usMclkLow; - UCHAR ucMclkHigh; -}ATOM_PPLIB_PhaseSheddingLimits_Record; - -typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table -{ - UCHAR ucNumEntries; // Number of entries. - ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries. -}ATOM_PPLIB_PhaseSheddingLimits_Table; - -typedef struct _VCEClockInfo{ - USHORT usEVClkLow; - UCHAR ucEVClkHigh; - USHORT usECClkLow; - UCHAR ucECClkHigh; -}VCEClockInfo; - -typedef struct _VCEClockInfoArray{ - UCHAR ucNumEntries; - VCEClockInfo entries[1]; -}VCEClockInfoArray; - -typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record -{ - USHORT usVoltage; - UCHAR ucVCEClockInfoIndex; -}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record; - -typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table -{ - UCHAR numEntries; - ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1]; -}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table; - -typedef struct _ATOM_PPLIB_VCE_State_Record -{ - UCHAR ucVCEClockInfoIndex; - UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary -}ATOM_PPLIB_VCE_State_Record; - -typedef struct _ATOM_PPLIB_VCE_State_Table -{ - UCHAR numEntries; - ATOM_PPLIB_VCE_State_Record entries[1]; -}ATOM_PPLIB_VCE_State_Table; - - -typedef struct _ATOM_PPLIB_VCE_Table -{ - UCHAR revid; -// VCEClockInfoArray array; -// ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits; -// ATOM_PPLIB_VCE_State_Table states; -}ATOM_PPLIB_VCE_Table; - - -typedef struct _UVDClockInfo{ - USHORT usVClkLow; - UCHAR ucVClkHigh; - USHORT usDClkLow; - UCHAR ucDClkHigh; -}UVDClockInfo; - -typedef struct _UVDClockInfoArray{ - UCHAR ucNumEntries; - UVDClockInfo entries[1]; -}UVDClockInfoArray; - -typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record -{ - USHORT usVoltage; - UCHAR ucUVDClockInfoIndex; -}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record; - -typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table -{ - UCHAR numEntries; - ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1]; -}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table; - -typedef struct _ATOM_PPLIB_UVD_Table -{ - UCHAR revid; -// UVDClockInfoArray array; -// ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits; -}ATOM_PPLIB_UVD_Table; - -typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record -{ - USHORT usVoltage; - USHORT usSAMClockLow; - UCHAR ucSAMClockHigh; -}ATOM_PPLIB_SAMClk_Voltage_Limit_Record; - -typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{ - UCHAR numEntries; - ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1]; -}ATOM_PPLIB_SAMClk_Voltage_Limit_Table; - -typedef struct _ATOM_PPLIB_SAMU_Table -{ - UCHAR revid; - ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits; -}ATOM_PPLIB_SAMU_Table; - -typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Record -{ - USHORT usVoltage; - USHORT usACPClockLow; - UCHAR ucACPClockHigh; -}ATOM_PPLIB_ACPClk_Voltage_Limit_Record; - -typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Table{ - UCHAR numEntries; - ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1]; -}ATOM_PPLIB_ACPClk_Voltage_Limit_Table; - -typedef struct _ATOM_PPLIB_ACP_Table -{ - UCHAR revid; - ATOM_PPLIB_ACPClk_Voltage_Limit_Table limits; -}ATOM_PPLIB_ACP_Table; - -typedef struct _ATOM_PowerTune_Table{ - USHORT usTDP; - USHORT usConfigurableTDP; - USHORT usTDC; - USHORT usBatteryPowerLimit; - USHORT usSmallPowerLimit; - USHORT usLowCACLeakage; - USHORT usHighCACLeakage; -}ATOM_PowerTune_Table; - -typedef struct _ATOM_PPLIB_POWERTUNE_Table -{ - UCHAR revid; - ATOM_PowerTune_Table power_tune_table; -}ATOM_PPLIB_POWERTUNE_Table; - -typedef struct _ATOM_PPLIB_POWERTUNE_Table_V1 -{ - UCHAR revid; - ATOM_PowerTune_Table power_tune_table; - USHORT usMaximumPowerDeliveryLimit; - USHORT usReserve[7]; -} ATOM_PPLIB_POWERTUNE_Table_V1; - -#define ATOM_PPM_A_A 1 -#define ATOM_PPM_A_I 2 -typedef struct _ATOM_PPLIB_PPM_Table -{ - UCHAR ucRevId; - UCHAR ucPpmDesign; //A+I or A+A - USHORT usCpuCoreNumber; - ULONG ulPlatformTDP; - ULONG ulSmallACPlatformTDP; - ULONG ulPlatformTDC; - ULONG ulSmallACPlatformTDC; - ULONG ulApuTDP; - ULONG ulDGpuTDP; - ULONG ulDGpuUlvPower; - ULONG ulTjmax; -} ATOM_PPLIB_PPM_Table; - -#pragma pack() - -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index d5433249a..b8cbd2af2 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -145,6 +145,8 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); + if (adev->sdma[i].feature_version >= 20) + adev->sdma[i].burst_nop = true; if (adev->firmware.smu_load) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; @@ -217,6 +219,19 @@ static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring) WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); } +static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) +{ + struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring); + int i; + + for (i = 0; i < count; i++) + if (sdma && sdma->burst_nop && (i == 0)) + amdgpu_ring_write(ring, ring->nop | + SDMA_PKT_NOP_HEADER_COUNT(count - 1)); + else + amdgpu_ring_write(ring, ring->nop); +} + /** * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine * @@ -244,8 +259,8 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, next_rptr); /* IB packet must end on a 8 DW boundary */ - while ((ring->wptr & 7) != 2) - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP)); + sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8); + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | SDMA_PKT_INDIRECT_HEADER_VMID(vmid)); /* base must be 32 byte aligned */ @@ -672,6 +687,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; + struct fence *f = NULL; unsigned i; unsigned index; int r; @@ -687,12 +703,11 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); - + memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(ring, NULL, 256, &ib); if (r) { - amdgpu_wb_free(adev, index); DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); - return r; + goto err0; } ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | @@ -706,19 +721,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); ib.length_dw = 8; - r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); - if (r) { - amdgpu_ib_free(adev, &ib); - amdgpu_wb_free(adev, index); - DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); - return r; - } - r = amdgpu_fence_wait(ib.fence, false); + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, + AMDGPU_FENCE_OWNER_UNDEFINED, + &f); + if (r) + goto err1; + + r = fence_wait(f, false); if (r) { - amdgpu_ib_free(adev, &ib); - amdgpu_wb_free(adev, index); DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - return r; + goto err1; } for (i = 0; i < adev->usec_timeout; i++) { tmp = le32_to_cpu(adev->wb.wb[index]); @@ -728,12 +740,17 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ib.fence->ring->idx, i); + ring->idx, i); + goto err1; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } + +err1: + fence_put(f); amdgpu_ib_free(adev, &ib); +err0: amdgpu_wb_free(adev, index); return r; } @@ -876,8 +893,19 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, */ static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib) { - while (ib->length_dw & 0x7) - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); + struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring); + u32 pad_count; + int i; + + pad_count = (8 - (ib->length_dw & 0x7)) % 8; + for (i = 0; i < pad_count; i++) + if (sdma && sdma->burst_nop && (i == 0)) + ib->ptr[ib->length_dw++] = + SDMA_PKT_HEADER_OP(SDMA_OP_NOP) | + SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1); + else + ib->ptr[ib->length_dw++] = + SDMA_PKT_HEADER_OP(SDMA_OP_NOP); } /** @@ -1311,6 +1339,7 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { .test_ring = sdma_v2_4_ring_test_ring, .test_ib = sdma_v2_4_ring_test_ib, .is_lockup = sdma_v2_4_ring_is_lockup, + .insert_nop = sdma_v2_4_ring_insert_nop, }; static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) @@ -1347,19 +1376,19 @@ static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev) * Used by the amdgpu ttm implementation to move pages if * registered as the asic copy callback. */ -static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ring *ring, +static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count) { - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR)); - amdgpu_ring_write(ring, byte_count); - amdgpu_ring_write(ring, 0); /* src/dst endian swap */ - amdgpu_ring_write(ring, lower_32_bits(src_offset)); - amdgpu_ring_write(ring, upper_32_bits(src_offset)); - amdgpu_ring_write(ring, lower_32_bits(dst_offset)); - amdgpu_ring_write(ring, upper_32_bits(dst_offset)); + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = byte_count; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); + ib->ptr[ib->length_dw++] = upper_32_bits(src_offset); + ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); + ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); } /** @@ -1372,16 +1401,16 @@ static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ring *ring, * * Fill GPU buffers using the DMA engine (VI). */ -static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ring *ring, +static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ib *ib, uint32_t src_data, uint64_t dst_offset, uint32_t byte_count) { - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL)); - amdgpu_ring_write(ring, lower_32_bits(dst_offset)); - amdgpu_ring_write(ring, upper_32_bits(dst_offset)); - amdgpu_ring_write(ring, src_data); - amdgpu_ring_write(ring, byte_count); + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL); + ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); + ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); + ib->ptr[ib->length_dw++] = src_data; + ib->ptr[ib->length_dw++] = byte_count; } static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = { @@ -1414,5 +1443,6 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; + adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; } } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 202594086..add25abf6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -77,6 +77,24 @@ static const u32 tonga_mgcg_cgcg_init[] = mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 }; +static const u32 golden_settings_fiji_a10[] = +{ + mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, + mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, + mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, +}; + +static const u32 fiji_mgcg_cgcg_init[] = +{ + mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100, + mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 +}; + static const u32 cz_golden_settings_a11[] = { mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, @@ -119,6 +137,14 @@ static const u32 cz_mgcg_cgcg_init[] = static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { + case CHIP_FIJI: + amdgpu_program_register_sequence(adev, + fiji_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + golden_settings_fiji_a10, + (const u32)ARRAY_SIZE(golden_settings_fiji_a10)); + break; case CHIP_TONGA: amdgpu_program_register_sequence(adev, tonga_mgcg_cgcg_init, @@ -164,6 +190,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) case CHIP_TONGA: chip_name = "tonga"; break; + case CHIP_FIJI: + chip_name = "fiji"; + break; case CHIP_CARRIZO: chip_name = "carrizo"; break; @@ -184,6 +213,8 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); + if (adev->sdma[i].feature_version >= 20) + adev->sdma[i].burst_nop = true; if (adev->firmware.smu_load) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; @@ -270,6 +301,19 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring) } } +static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) +{ + struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring); + int i; + + for (i = 0; i < count; i++) + if (sdma && sdma->burst_nop && (i == 0)) + amdgpu_ring_write(ring, ring->nop | + SDMA_PKT_NOP_HEADER_COUNT(count - 1)); + else + amdgpu_ring_write(ring, ring->nop); +} + /** * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine * @@ -296,8 +340,7 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, next_rptr); /* IB packet must end on a 8 DW boundary */ - while ((ring->wptr & 7) != 2) - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP)); + sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | SDMA_PKT_INDIRECT_HEADER_VMID(vmid)); @@ -760,6 +803,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; + struct fence *f = NULL; unsigned i; unsigned index; int r; @@ -775,12 +819,11 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); - + memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(ring, NULL, 256, &ib); if (r) { - amdgpu_wb_free(adev, index); DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); - return r; + goto err0; } ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | @@ -794,19 +837,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); ib.length_dw = 8; - r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); - if (r) { - amdgpu_ib_free(adev, &ib); - amdgpu_wb_free(adev, index); - DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); - return r; - } - r = amdgpu_fence_wait(ib.fence, false); + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, + AMDGPU_FENCE_OWNER_UNDEFINED, + &f); + if (r) + goto err1; + + r = fence_wait(f, false); if (r) { - amdgpu_ib_free(adev, &ib); - amdgpu_wb_free(adev, index); DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - return r; + goto err1; } for (i = 0; i < adev->usec_timeout; i++) { tmp = le32_to_cpu(adev->wb.wb[index]); @@ -816,12 +856,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { DRM_INFO("ib test on ring %d succeeded in %u usecs\n", - ib.fence->ring->idx, i); + ring->idx, i); + goto err1; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } +err1: + fence_put(f); amdgpu_ib_free(adev, &ib); +err0: amdgpu_wb_free(adev, index); return r; } @@ -964,8 +1008,19 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, */ static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib) { - while (ib->length_dw & 0x7) - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); + struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring); + u32 pad_count; + int i; + + pad_count = (8 - (ib->length_dw & 0x7)) % 8; + for (i = 0; i < pad_count; i++) + if (sdma && sdma->burst_nop && (i == 0)) + ib->ptr[ib->length_dw++] = + SDMA_PKT_HEADER_OP(SDMA_OP_NOP) | + SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1); + else + ib->ptr[ib->length_dw++] = + SDMA_PKT_HEADER_OP(SDMA_OP_NOP); } /** @@ -1403,6 +1458,7 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { .test_ring = sdma_v3_0_ring_test_ring, .test_ib = sdma_v3_0_ring_test_ib, .is_lockup = sdma_v3_0_ring_is_lockup, + .insert_nop = sdma_v3_0_ring_insert_nop, }; static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) @@ -1439,19 +1495,19 @@ static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev) * Used by the amdgpu ttm implementation to move pages if * registered as the asic copy callback. */ -static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ring *ring, +static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count) { - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR)); - amdgpu_ring_write(ring, byte_count); - amdgpu_ring_write(ring, 0); /* src/dst endian swap */ - amdgpu_ring_write(ring, lower_32_bits(src_offset)); - amdgpu_ring_write(ring, upper_32_bits(src_offset)); - amdgpu_ring_write(ring, lower_32_bits(dst_offset)); - amdgpu_ring_write(ring, upper_32_bits(dst_offset)); + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = byte_count; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); + ib->ptr[ib->length_dw++] = upper_32_bits(src_offset); + ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); + ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); } /** @@ -1464,16 +1520,16 @@ static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ring *ring, * * Fill GPU buffers using the DMA engine (VI). */ -static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ring *ring, +static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib, uint32_t src_data, uint64_t dst_offset, uint32_t byte_count) { - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL)); - amdgpu_ring_write(ring, lower_32_bits(dst_offset)); - amdgpu_ring_write(ring, upper_32_bits(dst_offset)); - amdgpu_ring_write(ring, src_data); - amdgpu_ring_write(ring, byte_count); + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL); + ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); + ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); + ib->ptr[ib->length_dw++] = src_data; + ib->ptr[ib->length_dw++] = byte_count; } static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = { @@ -1506,5 +1562,6 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev) if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; + adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; } } diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h index 099b7b561..e5ebd0842 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h +++ b/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h @@ -2236,5 +2236,10 @@ #define SDMA_PKT_NOP_HEADER_sub_op_shift 8 #define SDMA_PKT_NOP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_NOP_HEADER_sub_op_mask) << SDMA_PKT_NOP_HEADER_sub_op_shift) +/*define for count field*/ +#define SDMA_PKT_NOP_HEADER_count_offset 0 +#define SDMA_PKT_NOP_HEADER_count_mask 0x00003FFF +#define SDMA_PKT_NOP_HEADER_count_shift 16 +#define SDMA_PKT_NOP_HEADER_COUNT(x) (((x) & SDMA_PKT_NOP_HEADER_count_mask) << SDMA_PKT_NOP_HEADER_count_shift) #endif /* __TONGA_SDMA_PKT_OPEN_H_ */ diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c index 5fc53a40c..5421309c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c @@ -761,7 +761,9 @@ int tonga_smu_init(struct amdgpu_device *adev) /* Allocate FW image data structure and header buffer */ ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf); + true, AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, toc_buf); if (ret) { DRM_ERROR("Failed to allocate memory for TOC buffer\n"); return -ENOMEM; @@ -769,7 +771,9 @@ int tonga_smu_init(struct amdgpu_device *adev) /* Allocate buffer for SMU internal buffer */ ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, smu_buf); + true, AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, smu_buf); if (ret) { DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 9488ea6ea..ed50dd725 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -534,7 +534,7 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_fence *fence = NULL; + struct fence *fence = NULL; int r; r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); @@ -555,14 +555,14 @@ static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring) goto error; } - r = amdgpu_fence_wait(fence, false); + r = fence_wait(fence, false); if (r) { DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); goto error; } DRM_INFO("ib test on ring %d succeeded\n", ring->idx); error: - amdgpu_fence_unref(&fence); + fence_put(fence); amdgpu_asic_set_uvd_clocks(adev, 0, 0); return r; } @@ -886,6 +886,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { .test_ring = uvd_v4_2_ring_test_ring, .test_ib = uvd_v4_2_ring_test_ib, .is_lockup = amdgpu_ring_test_lockup, + .insert_nop = amdgpu_ring_insert_nop, }; static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index d0ed99822..9ad8b9906 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -580,7 +580,7 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_fence *fence = NULL; + struct fence *fence = NULL; int r; r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); @@ -601,14 +601,14 @@ static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring) goto error; } - r = amdgpu_fence_wait(fence, false); + r = fence_wait(fence, false); if (r) { DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); goto error; } DRM_INFO("ib test on ring %d succeeded\n", ring->idx); error: - amdgpu_fence_unref(&fence); + fence_put(fence); amdgpu_asic_set_uvd_clocks(adev, 0, 0); return r; } @@ -825,6 +825,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { .test_ring = uvd_v5_0_ring_test_ring, .test_ib = uvd_v5_0_ring_test_ib, .is_lockup = amdgpu_ring_test_lockup, + .insert_nop = amdgpu_ring_insert_nop, }; static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 345eb760f..7e9934fa4 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -214,10 +214,12 @@ static int uvd_v6_0_suspend(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_suspend(adev); - if (r) - return r; - + /* Skip this for APU for now */ + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_uvd_suspend(adev); + if (r) + return r; + } r = uvd_v6_0_hw_fini(adev); if (r) return r; @@ -230,10 +232,12 @@ static int uvd_v6_0_resume(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_resume(adev); - if (r) - return r; - + /* Skip this for APU for now */ + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_uvd_resume(adev); + if (r) + return r; + } r = uvd_v6_0_hw_init(adev); if (r) return r; @@ -575,7 +579,7 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, */ static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring) { - struct amdgpu_fence *fence = NULL; + struct fence *fence = NULL; int r; r = amdgpu_uvd_get_create_msg(ring, 1, NULL); @@ -590,14 +594,14 @@ static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring) goto error; } - r = amdgpu_fence_wait(fence, false); + r = fence_wait(fence, false); if (r) { DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); goto error; } DRM_INFO("ib test on ring %d succeeded\n", ring->idx); error: - amdgpu_fence_unref(&fence); + fence_put(fence); return r; } @@ -805,6 +809,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = { .test_ring = uvd_v6_0_ring_test_ring, .test_ib = uvd_v6_0_ring_test_ib, .is_lockup = amdgpu_ring_test_lockup, + .insert_nop = amdgpu_ring_insert_nop, }; static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 303d961d5..cd16df543 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -643,6 +643,7 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = { .test_ring = amdgpu_vce_ring_test_ring, .test_ib = amdgpu_vce_ring_test_ib, .is_lockup = amdgpu_ring_test_lockup, + .insert_nop = amdgpu_ring_insert_nop, }; static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index d1064ca36..f0656dfb5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -32,8 +32,8 @@ #include "vid.h" #include "vce/vce_3_0_d.h" #include "vce/vce_3_0_sh_mask.h" -#include "oss/oss_2_0_d.h" -#include "oss/oss_2_0_sh_mask.h" +#include "oss/oss_3_0_d.h" +#include "oss/oss_3_0_sh_mask.h" #include "gca/gfx_8_0_d.h" #include "smu/smu_7_1_2_d.h" #include "smu/smu_7_1_2_sh_mask.h" @@ -205,7 +205,14 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) u32 tmp; unsigned ret; - if (adev->flags & AMDGPU_IS_APU) + /* Fiji is single pipe */ + if (adev->asic_type == CHIP_FIJI) { + ret = AMDGPU_VCE_HARVEST_VCE1; + return ret; + } + + /* Tonga and CZ are dual or single pipe */ + if (adev->flags & AMD_IS_APU) tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) & VCE_HARVEST_FUSE_MACRO__MASK) >> VCE_HARVEST_FUSE_MACRO__SHIFT; @@ -419,17 +426,41 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) static bool vce_v3_0_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 mask = 0; + int idx; + + for (idx = 0; idx < 2; ++idx) { + if (adev->vce.harvest_config & (1 << idx)) + continue; + + if (idx == 0) + mask |= SRBM_STATUS2__VCE0_BUSY_MASK; + else + mask |= SRBM_STATUS2__VCE1_BUSY_MASK; + } - return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK); + return !(RREG32(mmSRBM_STATUS2) & mask); } static int vce_v3_0_wait_for_idle(void *handle) { unsigned i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 mask = 0; + int idx; + + for (idx = 0; idx < 2; ++idx) { + if (adev->vce.harvest_config & (1 << idx)) + continue; + + if (idx == 0) + mask |= SRBM_STATUS2__VCE0_BUSY_MASK; + else + mask |= SRBM_STATUS2__VCE1_BUSY_MASK; + } for (i = 0; i < adev->usec_timeout; i++) { - if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK)) + if (!(RREG32(mmSRBM_STATUS2) & mask)) return 0; } return -ETIMEDOUT; @@ -438,9 +469,21 @@ static int vce_v3_0_wait_for_idle(void *handle) static int vce_v3_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 mask = 0; + int idx; - WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK, - ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK); + for (idx = 0; idx < 2; ++idx) { + if (adev->vce.harvest_config & (1 << idx)) + continue; + + if (idx == 0) + mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK; + else + mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK; + } + WREG32_P(mmSRBM_SOFT_RESET, mask, + ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK | + SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK)); mdelay(5); return vce_v3_0_start(adev); @@ -601,6 +644,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = { .test_ring = amdgpu_vce_ring_test_ring, .test_ib = amdgpu_vce_ring_test_ib, .is_lockup = amdgpu_ring_test_lockup, + .insert_nop = amdgpu_ring_insert_nop, }; static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 9ffa56ceb..0bac8702e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -203,6 +203,17 @@ static const u32 tonga_mgcg_cgcg_init[] = mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, }; +static const u32 fiji_mgcg_cgcg_init[] = +{ + mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, + mmPCIE_INDEX, 0xffffffff, 0x0140001c, + mmPCIE_DATA, 0x000f0000, 0x00000000, + mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, + mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, + mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, + mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, +}; + static const u32 iceland_mgcg_cgcg_init[] = { mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, @@ -232,6 +243,11 @@ static void vi_init_golden_registers(struct amdgpu_device *adev) iceland_mgcg_cgcg_init, (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); break; + case CHIP_FIJI: + amdgpu_program_register_sequence(adev, + fiji_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); + break; case CHIP_TONGA: amdgpu_program_register_sequence(adev, tonga_mgcg_cgcg_init, @@ -261,7 +277,7 @@ static u32 vi_get_xclk(struct amdgpu_device *adev) u32 reference_clock = adev->clock.spll.reference_freq; u32 tmp; - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) return reference_clock; tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); @@ -362,6 +378,26 @@ static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = { static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { {mmGRBM_STATUS, false}, + {mmGRBM_STATUS2, false}, + {mmGRBM_STATUS_SE0, false}, + {mmGRBM_STATUS_SE1, false}, + {mmGRBM_STATUS_SE2, false}, + {mmGRBM_STATUS_SE3, false}, + {mmSRBM_STATUS, false}, + {mmSRBM_STATUS2, false}, + {mmSRBM_STATUS3, false}, + {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false}, + {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false}, + {mmCP_STAT, false}, + {mmCP_STALLED_STAT1, false}, + {mmCP_STALLED_STAT2, false}, + {mmCP_STALLED_STAT3, false}, + {mmCP_CPF_BUSY_STAT, false}, + {mmCP_CPF_STALLED_STAT1, false}, + {mmCP_CPF_STATUS, false}, + {mmCP_CPC_BUSY_STAT, false}, + {mmCP_CPC_STALLED_STAT1, false}, + {mmCP_CPC_STATUS, false}, {mmGB_ADDR_CONFIG, false}, {mmMC_ARB_RAMCFG, false}, {mmGB_TILE_MODE0, false}, @@ -449,6 +485,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num, asic_register_table = tonga_allowed_read_registers; size = ARRAY_SIZE(tonga_allowed_read_registers); break; + case CHIP_FIJI: case CHIP_TONGA: case CHIP_CARRIZO: asic_register_table = cz_allowed_read_registers; @@ -751,7 +788,7 @@ static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); - if (!(adev->flags & AMDGPU_IS_APU)) { + if (!(adev->flags & AMD_IS_APU)) { if (reset_mask & AMDGPU_RESET_MC) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1); @@ -974,7 +1011,7 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev) if (amdgpu_pcie_gen2 == 0) return; - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) return; ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); @@ -1002,7 +1039,7 @@ static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, u32 tmp; /* not necessary on CZ */ - if (adev->flags & AMDGPU_IS_APU) + if (adev->flags & AMD_IS_APU) return; tmp = RREG32(mmBIF_DOORBELL_APER_EN); @@ -1130,6 +1167,74 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version fiji_ip_blocks[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 5, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &tonga_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &fiji_dpm_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 10, + .minor = 1, + .rev = 0, + .funcs = &dce_v10_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 6, + .minor = 0, + .rev = 0, + .funcs = &uvd_v6_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version cz_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1205,6 +1310,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) adev->ip_blocks = topaz_ip_blocks; adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); break; + case CHIP_FIJI: + adev->ip_blocks = fiji_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks); + break; case CHIP_TONGA: adev->ip_blocks = tonga_ip_blocks; adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); @@ -1251,7 +1360,7 @@ static int vi_common_early_init(void *handle) bool smc_enabled = false; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->flags & AMDGPU_IS_APU) { + if (adev->flags & AMD_IS_APU) { adev->smc_rreg = &cz_smc_rreg; adev->smc_wreg = &cz_smc_wreg; } else { @@ -1282,6 +1391,7 @@ static int vi_common_early_init(void *handle) if (amdgpu_smc_load_fw && smc_enabled) adev->firmware.smu_load = true; break; + case CHIP_FIJI: case CHIP_TONGA: adev->has_uvd = true; adev->cg_flags = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h index 3b45332f5..fc120ba18 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h @@ -30,7 +30,7 @@ int cz_smu_start(struct amdgpu_device *adev); int cz_smu_fini(struct amdgpu_device *adev); extern const struct amd_ip_funcs tonga_dpm_ip_funcs; - +extern const struct amd_ip_funcs fiji_dpm_ip_funcs; extern const struct amd_ip_funcs iceland_dpm_ip_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h index 31bb89452..d98aa9d82 100644 --- a/drivers/gpu/drm/amd/amdgpu/vid.h +++ b/drivers/gpu/drm/amd/amdgpu/vid.h @@ -66,6 +66,11 @@ #define AMDGPU_NUM_OF_VMIDS 8 +#define PIPEID(x) ((x) << 0) +#define MEID(x) ((x) << 2) +#define VMID(x) ((x) << 4) +#define QUEUEID(x) ((x) << 8) + #define RB_BITMAP_WIDTH_PER_SH 2 #define MC_SEQ_MISC0__MT__MASK 0xf0000000 diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig index 8dfac37ff..e13c67c8d 100644 --- a/drivers/gpu/drm/amd/amdkfd/Kconfig +++ b/drivers/gpu/drm/amd/amdkfd/Kconfig @@ -4,6 +4,6 @@ config HSA_AMD tristate "HSA kernel driver for AMD GPU devices" - depends on DRM_RADEON && AMD_IOMMU_V2 && X86_64 + depends on (DRM_RADEON || DRM_AMDGPU) && AMD_IOMMU_V2 && X86_64 help Enable this if you want to use HSA features on AMD GPU devices. diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index 28551153e..7fc9b0f44 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile @@ -2,7 +2,8 @@ # Makefile for Heterogenous System Architecture support for AMD GPU devices # -ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/ +ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/ \ + -Idrivers/gpu/drm/amd/include/asic_reg amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \ kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \ diff --git a/drivers/gpu/drm/amd/amdkfd/cik_regs.h b/drivers/gpu/drm/amd/amdkfd/cik_regs.h index 183be5b84..48769d12d 100644 --- a/drivers/gpu/drm/amd/amdkfd/cik_regs.h +++ b/drivers/gpu/drm/amd/amdkfd/cik_regs.h @@ -65,17 +65,6 @@ #define AQL_ENABLE 1 -#define SDMA_RB_VMID(x) (x << 24) -#define SDMA_RB_ENABLE (1 << 0) -#define SDMA_RB_SIZE(x) ((x) << 1) /* log2 */ -#define SDMA_RPTR_WRITEBACK_ENABLE (1 << 12) -#define SDMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */ -#define SDMA_OFFSET(x) (x << 0) -#define SDMA_DB_ENABLE (1 << 28) -#define SDMA_ATC (1 << 0) -#define SDMA_VA_PTR32 (1 << 4) -#define SDMA_VA_SHARED_BASE(x) (x << 8) - #define GRBM_GFX_INDEX 0x30800 #define ATC_VMID_PASID_MAPPING_VALID (1U << 31) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index c99197301..c6a1b4cc6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include #include "kfd_priv.h" #include "kfd_device_queue_manager.h" diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 75312c829..3f95f7cb4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -80,7 +80,12 @@ static const struct kfd_deviceid supported_devices[] = { { 0x1318, &kaveri_device_info }, /* Kaveri */ { 0x131B, &kaveri_device_info }, /* Kaveri */ { 0x131C, &kaveri_device_info }, /* Kaveri */ - { 0x131D, &kaveri_device_info } /* Kaveri */ + { 0x131D, &kaveri_device_info }, /* Kaveri */ + { 0x9870, &carrizo_device_info }, /* Carrizo */ + { 0x9874, &carrizo_device_info }, /* Carrizo */ + { 0x9875, &carrizo_device_info }, /* Carrizo */ + { 0x9876, &carrizo_device_info }, /* Carrizo */ + { 0x9877, &carrizo_device_info } /* Carrizo */ }; static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c index 9ce8a20a7..c6f435aa8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c @@ -23,6 +23,7 @@ #include "kfd_device_queue_manager.h" #include "cik_regs.h" +#include "oss/oss_2_4_sh_mask.h" static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, struct qcm_process_device *qpd, @@ -135,13 +136,16 @@ static int register_process_cik(struct device_queue_manager *dqm, static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd) { - uint32_t value = SDMA_ATC; + uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT); if (q->process->is_32bit_user_mode) - value |= SDMA_VA_PTR32 | get_sh_mem_bases_32(qpd_to_pdd(qpd)); + value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) | + get_sh_mem_bases_32(qpd_to_pdd(qpd)); else - value |= SDMA_VA_SHARED_BASE(get_sh_mem_bases_nybble_64( - qpd_to_pdd(qpd))); + value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) << + SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) & + SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK; + q->properties.sdma_vm_addr = value; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c index 4c15212a3..7e9cae9d3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c @@ -22,6 +22,10 @@ */ #include "kfd_device_queue_manager.h" +#include "gca/gfx_8_0_enum.h" +#include "gca/gfx_8_0_sh_mask.h" +#include "gca/gfx_8_0_enum.h" +#include "oss/oss_3_0_sh_mask.h" static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, struct qcm_process_device *qpd, @@ -37,14 +41,40 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops) { - pr_warn("amdkfd: VI DQM is not currently supported\n"); - ops->set_cache_memory_policy = set_cache_memory_policy_vi; ops->register_process = register_process_vi; ops->initialize = initialize_cpsch_vi; ops->init_sdma_vm = init_sdma_vm; } +static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble) +{ + /* In 64-bit mode, we can only control the top 3 bits of the LDS, + * scratch and GPUVM apertures. + * The hardware fills in the remaining 59 bits according to the + * following pattern: + * LDS: X0000000'00000000 - X0000001'00000000 (4GB) + * Scratch: X0000001'00000000 - X0000002'00000000 (4GB) + * GPUVM: Y0010000'00000000 - Y0020000'00000000 (1TB) + * + * (where X/Y is the configurable nybble with the low-bit 0) + * + * LDS and scratch will have the same top nybble programmed in the + * top 3 bits of SH_MEM_BASES.PRIVATE_BASE. + * GPUVM can have a different top nybble programmed in the + * top 3 bits of SH_MEM_BASES.SHARED_BASE. + * We don't bother to support different top nybbles + * for LDS/Scratch and GPUVM. + */ + + BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE || + top_address_nybble == 0); + + return top_address_nybble << 12 | + (top_address_nybble << 12) << + SH_MEM_BASES__SHARED_BASE__SHIFT; +} + static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, struct qcm_process_device *qpd, enum cache_policy default_policy, @@ -52,18 +82,83 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, void __user *alternate_aperture_base, uint64_t alternate_aperture_size) { - return false; + uint32_t default_mtype; + uint32_t ape1_mtype; + + default_mtype = (default_policy == cache_policy_coherent) ? + MTYPE_CC : + MTYPE_NC; + + ape1_mtype = (alternate_policy == cache_policy_coherent) ? + MTYPE_CC : + MTYPE_NC; + + qpd->sh_mem_config = (qpd->sh_mem_config & + SH_MEM_CONFIG__ADDRESS_MODE_MASK) | + SH_MEM_ALIGNMENT_MODE_UNALIGNED << + SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT | + default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT | + ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT | + SH_MEM_CONFIG__PRIVATE_ATC_MASK; + + return true; } static int register_process_vi(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { - return -1; + struct kfd_process_device *pdd; + unsigned int temp; + + BUG_ON(!dqm || !qpd); + + pdd = qpd_to_pdd(qpd); + + /* check if sh_mem_config register already configured */ + if (qpd->sh_mem_config == 0) { + qpd->sh_mem_config = + SH_MEM_ALIGNMENT_MODE_UNALIGNED << + SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT | + MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT | + MTYPE_CC << SH_MEM_CONFIG__APE1_MTYPE__SHIFT | + SH_MEM_CONFIG__PRIVATE_ATC_MASK; + + qpd->sh_mem_ape1_limit = 0; + qpd->sh_mem_ape1_base = 0; + } + + if (qpd->pqm->process->is_32bit_user_mode) { + temp = get_sh_mem_bases_32(pdd); + qpd->sh_mem_bases = temp << SH_MEM_BASES__SHARED_BASE__SHIFT; + qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA32 << + SH_MEM_CONFIG__ADDRESS_MODE__SHIFT; + } else { + temp = get_sh_mem_bases_nybble_64(pdd); + qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp); + qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA64 << + SH_MEM_CONFIG__ADDRESS_MODE__SHIFT; + } + + pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n", + qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases); + + return 0; } static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd) { + uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT); + + if (q->process->is_32bit_user_mode) + value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) | + get_sh_mem_bases_32(qpd_to_pdd(qpd)); + else + value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) << + SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) & + SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK; + + q->properties.sdma_vm_addr = value; } static int initialize_cpsch_vi(struct device_queue_manager *dqm) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index 35b987574..2b655103b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -33,7 +33,7 @@ #include #include "kfd_priv.h" #include -#include +#include #include /* diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 434979428..d83de985e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -27,6 +27,7 @@ #include "kfd_mqd_manager.h" #include "cik_regs.h" #include "cik_structs.h" +#include "oss/oss_2_4_sh_mask.h" static inline struct cik_mqd *get_mqd(void *mqd) { @@ -214,17 +215,20 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, BUG_ON(!mm || !mqd || !q); m = get_sdma_mqd(mqd); - m->sdma_rlc_rb_cntl = - SDMA_RB_SIZE((ffs(q->queue_size / sizeof(unsigned int)))) | - SDMA_RB_VMID(q->vmid) | - SDMA_RPTR_WRITEBACK_ENABLE | - SDMA_RPTR_WRITEBACK_TIMER(6); + m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) << + SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | + q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | + 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | + 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT; m->sdma_rlc_rb_base = lower_32_bits(q->queue_address >> 8); m->sdma_rlc_rb_base_hi = upper_32_bits(q->queue_address >> 8); m->sdma_rlc_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr); m->sdma_rlc_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr); - m->sdma_rlc_doorbell = SDMA_OFFSET(q->doorbell_off) | SDMA_DB_ENABLE; + m->sdma_rlc_doorbell = q->doorbell_off << + SDMA0_RLC0_DOORBELL__OFFSET__SHIFT | + 1 << SDMA0_RLC0_DOORBELL__ENABLE__SHIFT; + m->sdma_rlc_virtual_addr = q->sdma_vm_addr; m->sdma_engine_id = q->sdma_engine_id; @@ -234,7 +238,9 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, if (q->queue_size > 0 && q->queue_address != 0 && q->queue_percent > 0) { - m->sdma_rlc_rb_cntl |= SDMA_RB_ENABLE; + m->sdma_rlc_rb_cntl |= + 1 << SDMA0_RLC0_RB_CNTL__RB_ENABLE__SHIFT; + q->is_active = true; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index b3a7e3ba1..fa32c32fa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -22,12 +22,255 @@ */ #include +#include #include "kfd_priv.h" #include "kfd_mqd_manager.h" +#include "vi_structs.h" +#include "gca/gfx_8_0_sh_mask.h" +#include "gca/gfx_8_0_enum.h" + +#define CP_MQD_CONTROL__PRIV_STATE__SHIFT 0x8 + +static inline struct vi_mqd *get_mqd(void *mqd) +{ + return (struct vi_mqd *)mqd; +} + +static int init_mqd(struct mqd_manager *mm, void **mqd, + struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, + struct queue_properties *q) +{ + int retval; + uint64_t addr; + struct vi_mqd *m; + + retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct vi_mqd), + mqd_mem_obj); + if (retval != 0) + return -ENOMEM; + + m = (struct vi_mqd *) (*mqd_mem_obj)->cpu_ptr; + addr = (*mqd_mem_obj)->gpu_addr; + + memset(m, 0, sizeof(struct vi_mqd)); + + m->header = 0xC0310800; + m->compute_pipelinestat_enable = 1; + m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF; + m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; + m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; + m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; + + m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | + 0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; + + m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT | + MTYPE_UC << CP_MQD_CONTROL__MTYPE__SHIFT; + + m->cp_mqd_base_addr_lo = lower_32_bits(addr); + m->cp_mqd_base_addr_hi = upper_32_bits(addr); + + m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT | + 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT | + 10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT; + + m->cp_hqd_pipe_priority = 1; + m->cp_hqd_queue_priority = 15; + + m->cp_hqd_eop_rptr = 1 << CP_HQD_EOP_RPTR__INIT_FETCHER__SHIFT; + + if (q->format == KFD_QUEUE_FORMAT_AQL) + m->cp_hqd_iq_rptr = 1; + + *mqd = m; + if (gart_addr != NULL) + *gart_addr = addr; + retval = mm->update_mqd(mm, m, q); + + return retval; +} + +static int load_mqd(struct mqd_manager *mm, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t __user *wptr) +{ + return mm->dev->kfd2kgd->hqd_load + (mm->dev->kgd, mqd, pipe_id, queue_id, wptr); +} + +static int __update_mqd(struct mqd_manager *mm, void *mqd, + struct queue_properties *q, unsigned int mtype, + unsigned int atc_bit) +{ + struct vi_mqd *m; + + BUG_ON(!mm || !q || !mqd); + + pr_debug("kfd: In func %s\n", __func__); + + m = get_mqd(mqd); + + m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT | + atc_bit << CP_HQD_PQ_CONTROL__PQ_ATC__SHIFT | + mtype << CP_HQD_PQ_CONTROL__MTYPE__SHIFT; + m->cp_hqd_pq_control |= + ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1; + pr_debug("kfd: cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control); + + m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); + m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); + + m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); + m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); + + m->cp_hqd_pq_doorbell_control = + 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT | + q->doorbell_off << + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; + pr_debug("kfd: cp_hqd_pq_doorbell_control 0x%x\n", + m->cp_hqd_pq_doorbell_control); + + m->cp_hqd_eop_control = atc_bit << CP_HQD_EOP_CONTROL__EOP_ATC__SHIFT | + mtype << CP_HQD_EOP_CONTROL__MTYPE__SHIFT; + + m->cp_hqd_ib_control = atc_bit << CP_HQD_IB_CONTROL__IB_ATC__SHIFT | + 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT | + mtype << CP_HQD_IB_CONTROL__MTYPE__SHIFT; + + m->cp_hqd_eop_control |= + ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1; + m->cp_hqd_eop_base_addr_lo = + lower_32_bits(q->eop_ring_buffer_address >> 8); + m->cp_hqd_eop_base_addr_hi = + upper_32_bits(q->eop_ring_buffer_address >> 8); + + m->cp_hqd_iq_timer = atc_bit << CP_HQD_IQ_TIMER__IQ_ATC__SHIFT | + mtype << CP_HQD_IQ_TIMER__MTYPE__SHIFT; + + m->cp_hqd_vmid = q->vmid; + + if (q->format == KFD_QUEUE_FORMAT_AQL) { + m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK | + 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT; + } + + m->cp_hqd_active = 0; + q->is_active = false; + if (q->queue_size > 0 && + q->queue_address != 0 && + q->queue_percent > 0) { + m->cp_hqd_active = 1; + q->is_active = true; + } + + return 0; +} + + +static int update_mqd(struct mqd_manager *mm, void *mqd, + struct queue_properties *q) +{ + return __update_mqd(mm, mqd, q, MTYPE_CC, 1); +} + +static int destroy_mqd(struct mqd_manager *mm, void *mqd, + enum kfd_preempt_type type, + unsigned int timeout, uint32_t pipe_id, + uint32_t queue_id) +{ + return mm->dev->kfd2kgd->hqd_destroy + (mm->dev->kgd, type, timeout, + pipe_id, queue_id); +} + +static void uninit_mqd(struct mqd_manager *mm, void *mqd, + struct kfd_mem_obj *mqd_mem_obj) +{ + BUG_ON(!mm || !mqd); + kfd_gtt_sa_free(mm->dev, mqd_mem_obj); +} + +static bool is_occupied(struct mqd_manager *mm, void *mqd, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id) +{ + return mm->dev->kfd2kgd->hqd_is_occupied( + mm->dev->kgd, queue_address, + pipe_id, queue_id); +} + +static int init_mqd_hiq(struct mqd_manager *mm, void **mqd, + struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, + struct queue_properties *q) +{ + struct vi_mqd *m; + int retval = init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q); + + if (retval != 0) + return retval; + + m = get_mqd(*mqd); + + m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT | + 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; + + return retval; +} + +static int update_mqd_hiq(struct mqd_manager *mm, void *mqd, + struct queue_properties *q) +{ + struct vi_mqd *m; + int retval = __update_mqd(mm, mqd, q, MTYPE_UC, 0); + + if (retval != 0) + return retval; + + m = get_mqd(mqd); + m->cp_hqd_vmid = q->vmid; + return retval; +} struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_dev *dev) { - pr_warn("amdkfd: VI MQD is not currently supported\n"); - return NULL; + struct mqd_manager *mqd; + + BUG_ON(!dev); + BUG_ON(type >= KFD_MQD_TYPE_MAX); + + pr_debug("kfd: In func %s\n", __func__); + + mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL); + if (!mqd) + return NULL; + + mqd->dev = dev; + + switch (type) { + case KFD_MQD_TYPE_CP: + case KFD_MQD_TYPE_COMPUTE: + mqd->init_mqd = init_mqd; + mqd->uninit_mqd = uninit_mqd; + mqd->load_mqd = load_mqd; + mqd->update_mqd = update_mqd; + mqd->destroy_mqd = destroy_mqd; + mqd->is_occupied = is_occupied; + break; + case KFD_MQD_TYPE_HIQ: + mqd->init_mqd = init_mqd_hiq; + mqd->uninit_mqd = uninit_mqd; + mqd->load_mqd = load_mqd; + mqd->update_mqd = update_mqd_hiq; + mqd->destroy_mqd = destroy_mqd; + mqd->is_occupied = is_occupied; + break; + case KFD_MQD_TYPE_SDMA: + break; + default: + kfree(mqd); + return NULL; + } + + return mqd; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 99b6d28a1..90f391434 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -27,6 +27,7 @@ #include "kfd_kernel_queue.h" #include "kfd_priv.h" #include "kfd_pm4_headers.h" +#include "kfd_pm4_headers_vi.h" #include "kfd_pm4_opcodes.h" static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes, @@ -55,6 +56,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm, bool *over_subscription) { unsigned int process_count, queue_count; + unsigned int map_queue_size; BUG_ON(!pm || !rlib_size || !over_subscription); @@ -69,9 +71,13 @@ static void pm_calc_rlib_size(struct packet_manager *pm, pr_debug("kfd: over subscribed runlist\n"); } + map_queue_size = + (pm->dqm->dev->device_info->asic_family == CHIP_CARRIZO) ? + sizeof(struct pm4_mes_map_queues) : + sizeof(struct pm4_map_queues); /* calculate run list ib allocation size */ *rlib_size = process_count * sizeof(struct pm4_map_process) + - queue_count * sizeof(struct pm4_map_queues); + queue_count * map_queue_size; /* * Increase the allocation size in case we need a chained run list @@ -176,6 +182,71 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer, return 0; } +static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer, + struct queue *q, bool is_static) +{ + struct pm4_mes_map_queues *packet; + bool use_static = is_static; + + BUG_ON(!pm || !buffer || !q); + + pr_debug("kfd: In func %s\n", __func__); + + packet = (struct pm4_mes_map_queues *)buffer; + memset(buffer, 0, sizeof(struct pm4_map_queues)); + + packet->header.u32all = build_pm4_header(IT_MAP_QUEUES, + sizeof(struct pm4_map_queues)); + packet->bitfields2.alloc_format = + alloc_format__mes_map_queues__one_per_pipe_vi; + packet->bitfields2.num_queues = 1; + packet->bitfields2.queue_sel = + queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi; + + packet->bitfields2.engine_sel = + engine_sel__mes_map_queues__compute_vi; + packet->bitfields2.queue_type = + queue_type__mes_map_queues__normal_compute_vi; + + switch (q->properties.type) { + case KFD_QUEUE_TYPE_COMPUTE: + if (use_static) + packet->bitfields2.queue_type = + queue_type__mes_map_queues__normal_latency_static_queue_vi; + break; + case KFD_QUEUE_TYPE_DIQ: + packet->bitfields2.queue_type = + queue_type__mes_map_queues__debug_interface_queue_vi; + break; + case KFD_QUEUE_TYPE_SDMA: + packet->bitfields2.engine_sel = + engine_sel__mes_map_queues__sdma0_vi; + use_static = false; /* no static queues under SDMA */ + break; + default: + pr_err("kfd: in %s queue type %d\n", __func__, + q->properties.type); + BUG(); + break; + } + packet->bitfields3.doorbell_offset = + q->properties.doorbell_off; + + packet->mqd_addr_lo = + lower_32_bits(q->gart_mqd_addr); + + packet->mqd_addr_hi = + upper_32_bits(q->gart_mqd_addr); + + packet->wptr_addr_lo = + lower_32_bits((uint64_t)q->properties.write_ptr); + + packet->wptr_addr_hi = + upper_32_bits((uint64_t)q->properties.write_ptr); + + return 0; +} + static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer, struct queue *q, bool is_static) { @@ -292,8 +363,17 @@ static int pm_create_runlist_ib(struct packet_manager *pm, pr_debug("kfd: static_queue, mapping kernel q %d, is debug status %d\n", kq->queue->queue, qpd->is_debug); - retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr], - kq->queue, qpd->is_debug); + if (pm->dqm->dev->device_info->asic_family == + CHIP_CARRIZO) + retval = pm_create_map_queue_vi(pm, + &rl_buffer[rl_wptr], + kq->queue, + qpd->is_debug); + else + retval = pm_create_map_queue(pm, + &rl_buffer[rl_wptr], + kq->queue, + qpd->is_debug); if (retval != 0) return retval; @@ -309,8 +389,17 @@ static int pm_create_runlist_ib(struct packet_manager *pm, pr_debug("kfd: static_queue, mapping user queue %d, is debug status %d\n", q->queue, qpd->is_debug); - retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr], - q, qpd->is_debug); + if (pm->dqm->dev->device_info->asic_family == + CHIP_CARRIZO) + retval = pm_create_map_queue_vi(pm, + &rl_buffer[rl_wptr], + q, + qpd->is_debug); + else + retval = pm_create_map_queue(pm, + &rl_buffer[rl_wptr], + q, + qpd->is_debug); if (retval != 0) return retval; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h new file mode 100644 index 000000000..08c721922 --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h @@ -0,0 +1,398 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef F32_MES_PM4_PACKETS_H +#define F32_MES_PM4_PACKETS_H + +#ifndef PM4_MES_HEADER_DEFINED +#define PM4_MES_HEADER_DEFINED +union PM4_MES_TYPE_3_HEADER { + struct { + uint32_t reserved1 : 8; /* < reserved */ + uint32_t opcode : 8; /* < IT opcode */ + uint32_t count : 14;/* < number of DWORDs - 1 in the + information body. */ + uint32_t type : 2; /* < packet identifier. + It should be 3 for type 3 packets */ + }; + uint32_t u32All; +}; +#endif /* PM4_MES_HEADER_DEFINED */ + +/*--------------------MES_SET_RESOURCES--------------------*/ + +#ifndef PM4_MES_SET_RESOURCES_DEFINED +#define PM4_MES_SET_RESOURCES_DEFINED +enum mes_set_resources_queue_type_enum { + queue_type__mes_set_resources__kernel_interface_queue_kiq = 0, + queue_type__mes_set_resources__hsa_interface_queue_hiq = 1, + queue_type__mes_set_resources__hsa_debug_interface_queue = 4 +}; + + +struct pm4_mes_set_resources { + union { + union PM4_MES_TYPE_3_HEADER header; /* header */ + uint32_t ordinal1; + }; + + union { + struct { + uint32_t vmid_mask:16; + uint32_t unmap_latency:8; + uint32_t reserved1:5; + enum mes_set_resources_queue_type_enum queue_type:3; + } bitfields2; + uint32_t ordinal2; + }; + + uint32_t queue_mask_lo; + uint32_t queue_mask_hi; + uint32_t gws_mask_lo; + uint32_t gws_mask_hi; + + union { + struct { + uint32_t oac_mask:16; + uint32_t reserved2:16; + } bitfields7; + uint32_t ordinal7; + }; + + union { + struct { + uint32_t gds_heap_base:6; + uint32_t reserved3:5; + uint32_t gds_heap_size:6; + uint32_t reserved4:15; + } bitfields8; + uint32_t ordinal8; + }; + +}; +#endif + +/*--------------------MES_RUN_LIST--------------------*/ + +#ifndef PM4_MES_RUN_LIST_DEFINED +#define PM4_MES_RUN_LIST_DEFINED + +struct pm4_mes_runlist { + union { + union PM4_MES_TYPE_3_HEADER header; /* header */ + uint32_t ordinal1; + }; + + union { + struct { + uint32_t reserved1:2; + uint32_t ib_base_lo:30; + } bitfields2; + uint32_t ordinal2; + }; + + union { + struct { + uint32_t ib_base_hi:16; + uint32_t reserved2:16; + } bitfields3; + uint32_t ordinal3; + }; + + union { + struct { + uint32_t ib_size:20; + uint32_t chain:1; + uint32_t offload_polling:1; + uint32_t reserved3:1; + uint32_t valid:1; + uint32_t reserved4:8; + } bitfields4; + uint32_t ordinal4; + }; + +}; +#endif + +/*--------------------MES_MAP_PROCESS--------------------*/ + +#ifndef PM4_MES_MAP_PROCESS_DEFINED +#define PM4_MES_MAP_PROCESS_DEFINED + +struct pm4_mes_map_process { + union { + union PM4_MES_TYPE_3_HEADER header; /* header */ + uint32_t ordinal1; + }; + + union { + struct { + uint32_t pasid:16; + uint32_t reserved1:8; + uint32_t diq_enable:1; + uint32_t process_quantum:7; + } bitfields2; + uint32_t ordinal2; +}; + + union { + struct { + uint32_t page_table_base:28; + uint32_t reserved2:4; + } bitfields3; + uint32_t ordinal3; + }; + + uint32_t sh_mem_bases; + uint32_t sh_mem_ape1_base; + uint32_t sh_mem_ape1_limit; + uint32_t sh_mem_config; + uint32_t gds_addr_lo; + uint32_t gds_addr_hi; + + union { + struct { + uint32_t num_gws:6; + uint32_t reserved3:2; + uint32_t num_oac:4; + uint32_t reserved4:4; + uint32_t gds_size:6; + uint32_t num_queues:10; + } bitfields10; + uint32_t ordinal10; + }; + +}; +#endif + +/*--------------------MES_MAP_QUEUES--------------------*/ + +#ifndef PM4_MES_MAP_QUEUES_VI_DEFINED +#define PM4_MES_MAP_QUEUES_VI_DEFINED +enum mes_map_queues_queue_sel_vi_enum { + queue_sel__mes_map_queues__map_to_specified_queue_slots_vi = 0, +queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi = 1 +}; + +enum mes_map_queues_queue_type_vi_enum { + queue_type__mes_map_queues__normal_compute_vi = 0, + queue_type__mes_map_queues__debug_interface_queue_vi = 1, + queue_type__mes_map_queues__normal_latency_static_queue_vi = 2, +queue_type__mes_map_queues__low_latency_static_queue_vi = 3 +}; + +enum mes_map_queues_alloc_format_vi_enum { + alloc_format__mes_map_queues__one_per_pipe_vi = 0, +alloc_format__mes_map_queues__all_on_one_pipe_vi = 1 +}; + +enum mes_map_queues_engine_sel_vi_enum { + engine_sel__mes_map_queues__compute_vi = 0, + engine_sel__mes_map_queues__sdma0_vi = 2, + engine_sel__mes_map_queues__sdma1_vi = 3 +}; + + +struct pm4_mes_map_queues { + union { + union PM4_MES_TYPE_3_HEADER header; /* header */ + uint32_t ordinal1; + }; + + union { + struct { + uint32_t reserved1:4; + enum mes_map_queues_queue_sel_vi_enum queue_sel:2; + uint32_t reserved2:15; + enum mes_map_queues_queue_type_vi_enum queue_type:3; + enum mes_map_queues_alloc_format_vi_enum alloc_format:2; + enum mes_map_queues_engine_sel_vi_enum engine_sel:3; + uint32_t num_queues:3; + } bitfields2; + uint32_t ordinal2; + }; + + union { + struct { + uint32_t reserved3:1; + uint32_t check_disable:1; + uint32_t doorbell_offset:21; + uint32_t reserved4:3; + uint32_t queue:6; + } bitfields3; + uint32_t ordinal3; + }; + + uint32_t mqd_addr_lo; + uint32_t mqd_addr_hi; + uint32_t wptr_addr_lo; + uint32_t wptr_addr_hi; +}; +#endif + +/*--------------------MES_QUERY_STATUS--------------------*/ + +#ifndef PM4_MES_QUERY_STATUS_DEFINED +#define PM4_MES_QUERY_STATUS_DEFINED +enum mes_query_status_interrupt_sel_enum { + interrupt_sel__mes_query_status__completion_status = 0, + interrupt_sel__mes_query_status__process_status = 1, + interrupt_sel__mes_query_status__queue_status = 2 +}; + +enum mes_query_status_command_enum { + command__mes_query_status__interrupt_only = 0, + command__mes_query_status__fence_only_immediate = 1, + command__mes_query_status__fence_only_after_write_ack = 2, + command__mes_query_status__fence_wait_for_write_ack_send_interrupt = 3 +}; + +enum mes_query_status_engine_sel_enum { + engine_sel__mes_query_status__compute = 0, + engine_sel__mes_query_status__sdma0_queue = 2, + engine_sel__mes_query_status__sdma1_queue = 3 +}; + +struct pm4_mes_query_status { + union { + union PM4_MES_TYPE_3_HEADER header; /* header */ + uint32_t ordinal1; + }; + + union { + struct { + uint32_t context_id:28; + enum mes_query_status_interrupt_sel_enum + interrupt_sel:2; + enum mes_query_status_command_enum command:2; + } bitfields2; + uint32_t ordinal2; + }; + + union { + struct { + uint32_t pasid:16; + uint32_t reserved1:16; + } bitfields3a; + struct { + uint32_t reserved2:2; + uint32_t doorbell_offset:21; + uint32_t reserved3:2; + enum mes_query_status_engine_sel_enum engine_sel:3; + uint32_t reserved4:4; + } bitfields3b; + uint32_t ordinal3; + }; + + uint32_t addr_lo; + uint32_t addr_hi; + uint32_t data_lo; + uint32_t data_hi; +}; +#endif + +/*--------------------MES_UNMAP_QUEUES--------------------*/ + +#ifndef PM4_MES_UNMAP_QUEUES_DEFINED +#define PM4_MES_UNMAP_QUEUES_DEFINED +enum mes_unmap_queues_action_enum { + action__mes_unmap_queues__preempt_queues = 0, + action__mes_unmap_queues__reset_queues = 1, + action__mes_unmap_queues__disable_process_queues = 2, + action__mes_unmap_queues__reserved = 3 +}; + +enum mes_unmap_queues_queue_sel_enum { + queue_sel__mes_unmap_queues__perform_request_on_specified_queues = 0, + queue_sel__mes_unmap_queues__perform_request_on_pasid_queues = 1, + queue_sel__mes_unmap_queues__unmap_all_queues = 2, + queue_sel__mes_unmap_queues__unmap_all_non_static_queues = 3 +}; + +enum mes_unmap_queues_engine_sel_enum { + engine_sel__mes_unmap_queues__compute = 0, + engine_sel__mes_unmap_queues__sdma0 = 2, + engine_sel__mes_unmap_queues__sdmal = 3 +}; + +struct PM4_MES_UNMAP_QUEUES { + union { + union PM4_MES_TYPE_3_HEADER header; /* header */ + uint32_t ordinal1; + }; + + union { + struct { + enum mes_unmap_queues_action_enum action:2; + uint32_t reserved1:2; + enum mes_unmap_queues_queue_sel_enum queue_sel:2; + uint32_t reserved2:20; + enum mes_unmap_queues_engine_sel_enum engine_sel:3; + uint32_t num_queues:3; + } bitfields2; + uint32_t ordinal2; + }; + + union { + struct { + uint32_t pasid:16; + uint32_t reserved3:16; + } bitfields3a; + struct { + uint32_t reserved4:2; + uint32_t doorbell_offset0:21; + uint32_t reserved5:9; + } bitfields3b; + uint32_t ordinal3; + }; + + union { + struct { + uint32_t reserved6:2; + uint32_t doorbell_offset1:21; + uint32_t reserved7:9; + } bitfields4; + uint32_t ordinal4; + }; + + union { + struct { + uint32_t reserved8:2; + uint32_t doorbell_offset2:21; + uint32_t reserved9:9; + } bitfields5; + uint32_t ordinal5; + }; + + union { + struct { + uint32_t reserved10:2; + uint32_t doorbell_offset3:21; + uint32_t reserved11:9; + } bitfields6; + uint32_t ordinal6; + }; +}; +#endif + +#endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index c25728bc3..74909e72a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1186,6 +1186,11 @@ int kfd_topology_add_device(struct kfd_dev *gpu) * TODO: Retrieve max engine clock values from KGD */ + if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) { + dev->node_props.capability |= HSA_CAP_DOORBELL_PACKET_TYPE; + pr_info("amdkfd: adding doorbell packet type capability\n"); + } + res = 0; err: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index 989624b3c..c3ddb9b95 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -40,6 +40,7 @@ #define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00 #define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8 #define HSA_CAP_RESERVED 0xfffff000 +#define HSA_CAP_DOORBELL_PACKET_TYPE 0x00001000 struct kfd_node_properties { uint32_t cpu_cores_count; diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 5bdf1b439..68a8eaa1b 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -23,6 +23,45 @@ #ifndef __AMD_SHARED_H__ #define __AMD_SHARED_H__ +#define AMD_MAX_USEC_TIMEOUT 100000 /* 100 ms */ + +/* +* Supported GPU families (aligned with amdgpu_drm.h) +*/ +#define AMD_FAMILY_UNKNOWN 0 +#define AMD_FAMILY_CI 120 /* Bonaire, Hawaii */ +#define AMD_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */ +#define AMD_FAMILY_VI 130 /* Iceland, Tonga */ +#define AMD_FAMILY_CZ 135 /* Carrizo */ + +/* + * Supported ASIC types + */ +enum amd_asic_type { + CHIP_BONAIRE = 0, + CHIP_KAVERI, + CHIP_KABINI, + CHIP_HAWAII, + CHIP_MULLINS, + CHIP_TOPAZ, + CHIP_TONGA, + CHIP_FIJI, + CHIP_CARRIZO, + CHIP_LAST, +}; + +/* + * Chip flags + */ +enum amd_chip_flags { + AMD_ASIC_MASK = 0x0000ffffUL, + AMD_FLAGS_MASK = 0xffff0000UL, + AMD_IS_MOBILITY = 0x00010000UL, + AMD_IS_APU = 0x00020000UL, + AMD_IS_PX = 0x00040000UL, + AMD_EXP_HW_SUPPORT = 0x00080000UL, +}; + enum amd_ip_block_type { AMD_IP_BLOCK_TYPE_COMMON, AMD_IP_BLOCK_TYPE_GMC, diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h new file mode 100644 index 000000000..44b1855cb --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h @@ -0,0 +1,1246 @@ +/* + * SMU_7_1_3 Register documentation + * + * Copyright (C) 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef SMU_7_1_3_D_H +#define SMU_7_1_3_D_H + +#define mmGCK_SMC_IND_INDEX 0x80 +#define mmGCK0_GCK_SMC_IND_INDEX 0x80 +#define mmGCK1_GCK_SMC_IND_INDEX 0x82 +#define mmGCK2_GCK_SMC_IND_INDEX 0x84 +#define mmGCK3_GCK_SMC_IND_INDEX 0x86 +#define mmGCK_SMC_IND_DATA 0x81 +#define mmGCK0_GCK_SMC_IND_DATA 0x81 +#define mmGCK1_GCK_SMC_IND_DATA 0x83 +#define mmGCK2_GCK_SMC_IND_DATA 0x85 +#define mmGCK3_GCK_SMC_IND_DATA 0x87 +#define ixGCK_MCLK_FUSES 0xc0500008 +#define ixCG_DCLK_CNTL 0xc050009c +#define ixCG_DCLK_STATUS 0xc05000a0 +#define ixCG_VCLK_CNTL 0xc05000a4 +#define ixCG_VCLK_STATUS 0xc05000a8 +#define ixCG_ECLK_CNTL 0xc05000ac +#define ixCG_ECLK_STATUS 0xc05000b0 +#define ixCG_ACLK_CNTL 0xc05000dc +#define ixCG_MCLK_CNTL 0xc0500120 +#define ixCG_MCLK_STATUS 0xc0500124 +#define ixGCK_DFS_BYPASS_CNTL 0xc0500118 +#define ixCG_SPLL_FUNC_CNTL 0xc0500140 +#define ixCG_SPLL_FUNC_CNTL_2 0xc0500144 +#define ixCG_SPLL_FUNC_CNTL_3 0xc0500148 +#define ixCG_SPLL_FUNC_CNTL_4 0xc050014c +#define ixCG_SPLL_FUNC_CNTL_5 0xc0500150 +#define ixCG_SPLL_FUNC_CNTL_6 0xc0500154 +#define ixCG_SPLL_FUNC_CNTL_7 0xc0500158 +#define ixSPLL_CNTL_MODE 0xc0500160 +#define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164 +#define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168 +#define ixMPLL_BYPASSCLK_SEL 0xc050019c +#define ixCG_CLKPIN_CNTL 0xc05001a0 +#define ixCG_CLKPIN_CNTL_2 0xc05001a4 +#define ixCG_CLKPIN_CNTL_DC 0xc0500204 +#define ixTHM_CLK_CNTL 0xc05001a8 +#define ixMISC_CLK_CTRL 0xc05001ac +#define ixGCK_PLL_TEST_CNTL 0xc05001c0 +#define ixGCK_PLL_TEST_CNTL_2 0xc05001c4 +#define ixGCK_ADFS_CLK_BYPASS_CNTL1 0xc05001c8 +#define mmSMC_IND_INDEX 0x80 +#define mmSMC0_SMC_IND_INDEX 0x80 +#define mmSMC1_SMC_IND_INDEX 0x82 +#define mmSMC2_SMC_IND_INDEX 0x84 +#define mmSMC3_SMC_IND_INDEX 0x86 +#define mmSMC_IND_DATA 0x81 +#define mmSMC0_SMC_IND_DATA 0x81 +#define mmSMC1_SMC_IND_DATA 0x83 +#define mmSMC2_SMC_IND_DATA 0x85 +#define mmSMC3_SMC_IND_DATA 0x87 +#define mmSMC_IND_INDEX_0 0x80 +#define mmSMC_IND_DATA_0 0x81 +#define mmSMC_IND_INDEX_1 0x82 +#define mmSMC_IND_DATA_1 0x83 +#define mmSMC_IND_INDEX_2 0x84 +#define mmSMC_IND_DATA_2 0x85 +#define mmSMC_IND_INDEX_3 0x86 +#define mmSMC_IND_DATA_3 0x87 +#define mmSMC_IND_INDEX_4 0x88 +#define mmSMC_IND_DATA_4 0x89 +#define mmSMC_IND_INDEX_5 0x8a +#define mmSMC_IND_DATA_5 0x8b +#define mmSMC_IND_INDEX_6 0x8c +#define mmSMC_IND_DATA_6 0x8d +#define mmSMC_IND_INDEX_7 0x8e +#define mmSMC_IND_DATA_7 0x8f +#define mmSMC_IND_ACCESS_CNTL 0x92 +#define mmSMC_MESSAGE_0 0x94 +#define mmSMC_RESP_0 0x95 +#define mmSMC_MESSAGE_1 0x96 +#define mmSMC_RESP_1 0x97 +#define mmSMC_MESSAGE_2 0x98 +#define mmSMC_RESP_2 0x99 +#define mmSMC_MESSAGE_3 0x9a +#define mmSMC_RESP_3 0x9b +#define mmSMC_MESSAGE_4 0x9c +#define mmSMC_RESP_4 0x9d +#define mmSMC_MESSAGE_5 0x9e +#define mmSMC_RESP_5 0x9f +#define mmSMC_MESSAGE_6 0xa0 +#define mmSMC_RESP_6 0xa1 +#define mmSMC_MESSAGE_7 0xa2 +#define mmSMC_RESP_7 0xa3 +#define mmSMC_MSG_ARG_0 0xa4 +#define mmSMC_MSG_ARG_1 0xa5 +#define mmSMC_MSG_ARG_2 0xa6 +#define mmSMC_MSG_ARG_3 0xa7 +#define mmSMC_MSG_ARG_4 0xa8 +#define mmSMC_MSG_ARG_5 0xa9 +#define mmSMC_MSG_ARG_6 0xaa +#define mmSMC_MSG_ARG_7 0xab +#define mmSMC_MESSAGE_8 0xb5 +#define mmSMC_RESP_8 0xb6 +#define mmSMC_MESSAGE_9 0xb7 +#define mmSMC_RESP_9 0xb8 +#define mmSMC_MESSAGE_10 0xb9 +#define mmSMC_RESP_10 0xba +#define mmSMC_MESSAGE_11 0xbb +#define mmSMC_RESP_11 0xbc +#define mmSMC_MSG_ARG_8 0xbd +#define mmSMC_MSG_ARG_9 0xbe +#define mmSMC_MSG_ARG_10 0xbf +#define mmSMC_MSG_ARG_11 0x93 +#define ixSMC_SYSCON_RESET_CNTL 0x80000000 +#define ixSMC_SYSCON_CLOCK_CNTL_0 0x80000004 +#define ixSMC_SYSCON_CLOCK_CNTL_1 0x80000008 +#define ixSMC_SYSCON_CLOCK_CNTL_2 0x8000000c +#define ixSMC_SYSCON_MISC_CNTL 0x80000010 +#define ixSMC_SYSCON_MSG_ARG_0 0x80000068 +#define ixSMC_PC_C 0x80000370 +#define ixSMC_SCRATCH9 0x80000424 +#define mmGPIOPAD_SW_INT_STAT 0x180 +#define mmGPIOPAD_STRENGTH 0x181 +#define mmGPIOPAD_MASK 0x182 +#define mmGPIOPAD_A 0x183 +#define mmGPIOPAD_EN 0x184 +#define mmGPIOPAD_Y 0x185 +#define mmGPIOPAD_PINSTRAPS 0x186 +#define mmGPIOPAD_INT_STAT_EN 0x187 +#define mmGPIOPAD_INT_STAT 0x188 +#define mmGPIOPAD_INT_STAT_AK 0x189 +#define mmGPIOPAD_INT_EN 0x18a +#define mmGPIOPAD_INT_TYPE 0x18b +#define mmGPIOPAD_INT_POLARITY 0x18c +#define mmGPIOPAD_EXTERN_TRIG_CNTL 0x18d +#define mmGPIOPAD_RCVR_SEL 0x191 +#define mmGPIOPAD_PU_EN 0x192 +#define mmGPIOPAD_PD_EN 0x193 +#define mmCG_FPS_CNT 0x1b6 +#define mmSMU_IND_INDEX_0 0x1a6 +#define mmSMU_IND_DATA_0 0x1a7 +#define mmSMU_IND_INDEX_1 0x1a8 +#define mmSMU_IND_DATA_1 0x1a9 +#define mmSMU_IND_INDEX_2 0x1aa +#define mmSMU_IND_DATA_2 0x1ab +#define mmSMU_IND_INDEX_3 0x1ac +#define mmSMU_IND_DATA_3 0x1ad +#define mmSMU_IND_INDEX_4 0x1ae +#define mmSMU_IND_DATA_4 0x1af +#define mmSMU_IND_INDEX_5 0x1b0 +#define mmSMU_IND_DATA_5 0x1b1 +#define mmSMU_IND_INDEX_6 0x1b2 +#define mmSMU_IND_DATA_6 0x1b3 +#define mmSMU_IND_INDEX_7 0x1b4 +#define mmSMU_IND_DATA_7 0x1b5 +#define mmSMU_SMC_IND_INDEX 0x80 +#define mmSMU0_SMU_SMC_IND_INDEX 0x80 +#define mmSMU1_SMU_SMC_IND_INDEX 0x82 +#define mmSMU2_SMU_SMC_IND_INDEX 0x84 +#define mmSMU3_SMU_SMC_IND_INDEX 0x86 +#define mmSMU_SMC_IND_DATA 0x81 +#define mmSMU0_SMU_SMC_IND_DATA 0x81 +#define mmSMU1_SMU_SMC_IND_DATA 0x83 +#define mmSMU2_SMU_SMC_IND_DATA 0x85 +#define mmSMU3_SMU_SMC_IND_DATA 0x87 +#define ixRCU_UC_EVENTS 0xc0000004 +#define ixRCU_MISC_CTRL 0xc0000010 +#define ixRCU_VIRT_RESET_REQ 0xc0000024 +#define ixCC_RCU_FUSES 0xc00c0000 +#define ixCC_SMU_MISC_FUSES 0xc00c0004 +#define ixCC_SCLK_VID_FUSES 0xc00c0008 +#define ixCC_GIO_IOCCFG_FUSES 0xc00c000c +#define ixCC_GIO_IOC_FUSES 0xc00c0010 +#define ixCC_SMU_TST_EFUSE1_MISC 0xc00c001c +#define ixCC_TST_ID_STRAPS 0xc00c0020 +#define ixCC_FCTRL_FUSES 0xc00c0024 +#define ixCC_HARVEST_FUSES 0xc00c0028 +#define ixSMU_MAIN_PLL_OP_FREQ 0xe0003020 +#define ixSMU_STATUS 0xe0003088 +#define ixSMU_FIRMWARE 0xe00030a4 +#define ixSMU_INPUT_DATA 0xe00030b8 +#define ixSMU_EFUSE_0 0xc0100000 +#define ixFIRMWARE_FLAGS 0x3f000 +#define ixTDC_STATUS 0x3f004 +#define ixTDC_MV_AVERAGE 0x3f008 +#define ixTDC_VRM_LIMIT 0x3f00c +#define ixFEATURE_STATUS 0x3f010 +#define ixENTITY_TEMPERATURES_1 0x3f014 +#define ixMCARB_DRAM_TIMING_TABLE_1 0x3f018 +#define ixMCARB_DRAM_TIMING_TABLE_2 0x3f01c +#define ixMCARB_DRAM_TIMING_TABLE_3 0x3f020 +#define ixMCARB_DRAM_TIMING_TABLE_4 0x3f024 +#define ixMCARB_DRAM_TIMING_TABLE_5 0x3f028 +#define ixMCARB_DRAM_TIMING_TABLE_6 0x3f02c +#define ixMCARB_DRAM_TIMING_TABLE_7 0x3f030 +#define ixMCARB_DRAM_TIMING_TABLE_8 0x3f034 +#define ixMCARB_DRAM_TIMING_TABLE_9 0x3f038 +#define ixMCARB_DRAM_TIMING_TABLE_10 0x3f03c +#define ixMCARB_DRAM_TIMING_TABLE_11 0x3f040 +#define ixMCARB_DRAM_TIMING_TABLE_12 0x3f044 +#define ixMCARB_DRAM_TIMING_TABLE_13 0x3f048 +#define ixMCARB_DRAM_TIMING_TABLE_14 0x3f04c +#define ixMCARB_DRAM_TIMING_TABLE_15 0x3f050 +#define ixMCARB_DRAM_TIMING_TABLE_16 0x3f054 +#define ixMCARB_DRAM_TIMING_TABLE_17 0x3f058 +#define ixMCARB_DRAM_TIMING_TABLE_18 0x3f05c +#define ixMCARB_DRAM_TIMING_TABLE_19 0x3f060 +#define ixMCARB_DRAM_TIMING_TABLE_20 0x3f064 +#define ixMCARB_DRAM_TIMING_TABLE_21 0x3f068 +#define ixMCARB_DRAM_TIMING_TABLE_22 0x3f06c +#define ixMCARB_DRAM_TIMING_TABLE_23 0x3f070 +#define ixMCARB_DRAM_TIMING_TABLE_24 0x3f074 +#define ixMCARB_DRAM_TIMING_TABLE_25 0x3f078 +#define ixMCARB_DRAM_TIMING_TABLE_26 0x3f07c +#define ixMCARB_DRAM_TIMING_TABLE_27 0x3f080 +#define ixMCARB_DRAM_TIMING_TABLE_28 0x3f084 +#define ixMCARB_DRAM_TIMING_TABLE_29 0x3f088 +#define ixMCARB_DRAM_TIMING_TABLE_30 0x3f08c +#define ixMCARB_DRAM_TIMING_TABLE_31 0x3f090 +#define ixMCARB_DRAM_TIMING_TABLE_32 0x3f094 +#define ixMCARB_DRAM_TIMING_TABLE_33 0x3f098 +#define ixMCARB_DRAM_TIMING_TABLE_34 0x3f09c +#define ixMCARB_DRAM_TIMING_TABLE_35 0x3f0a0 +#define ixMCARB_DRAM_TIMING_TABLE_36 0x3f0a4 +#define ixMCARB_DRAM_TIMING_TABLE_37 0x3f0a8 +#define ixMCARB_DRAM_TIMING_TABLE_38 0x3f0ac +#define ixMCARB_DRAM_TIMING_TABLE_39 0x3f0b0 +#define ixMCARB_DRAM_TIMING_TABLE_40 0x3f0b4 +#define ixMCARB_DRAM_TIMING_TABLE_41 0x3f0b8 +#define ixMCARB_DRAM_TIMING_TABLE_42 0x3f0bc +#define ixMCARB_DRAM_TIMING_TABLE_43 0x3f0c0 +#define ixMCARB_DRAM_TIMING_TABLE_44 0x3f0c4 +#define ixMCARB_DRAM_TIMING_TABLE_45 0x3f0c8 +#define ixMCARB_DRAM_TIMING_TABLE_46 0x3f0cc +#define ixMCARB_DRAM_TIMING_TABLE_47 0x3f0d0 +#define ixMCARB_DRAM_TIMING_TABLE_48 0x3f0d4 +#define ixMCARB_DRAM_TIMING_TABLE_49 0x3f0d8 +#define ixMCARB_DRAM_TIMING_TABLE_50 0x3f0dc +#define ixMCARB_DRAM_TIMING_TABLE_51 0x3f0e0 +#define ixMCARB_DRAM_TIMING_TABLE_52 0x3f0e4 +#define ixMCARB_DRAM_TIMING_TABLE_53 0x3f0e8 +#define ixMCARB_DRAM_TIMING_TABLE_54 0x3f0ec +#define ixMCARB_DRAM_TIMING_TABLE_55 0x3f0f0 +#define ixMCARB_DRAM_TIMING_TABLE_56 0x3f0f4 +#define ixMCARB_DRAM_TIMING_TABLE_57 0x3f0f8 +#define ixMCARB_DRAM_TIMING_TABLE_58 0x3f0fc +#define ixMCARB_DRAM_TIMING_TABLE_59 0x3f100 +#define ixMCARB_DRAM_TIMING_TABLE_60 0x3f104 +#define ixMCARB_DRAM_TIMING_TABLE_61 0x3f108 +#define ixMCARB_DRAM_TIMING_TABLE_62 0x3f10c +#define ixMCARB_DRAM_TIMING_TABLE_63 0x3f110 +#define ixMCARB_DRAM_TIMING_TABLE_64 0x3f114 +#define ixMCARB_DRAM_TIMING_TABLE_65 0x3f118 +#define ixMCARB_DRAM_TIMING_TABLE_66 0x3f11c +#define ixMCARB_DRAM_TIMING_TABLE_67 0x3f120 +#define ixMCARB_DRAM_TIMING_TABLE_68 0x3f124 +#define ixMCARB_DRAM_TIMING_TABLE_69 0x3f128 +#define ixMCARB_DRAM_TIMING_TABLE_70 0x3f12c +#define ixMCARB_DRAM_TIMING_TABLE_71 0x3f130 +#define ixMCARB_DRAM_TIMING_TABLE_72 0x3f134 +#define ixMCARB_DRAM_TIMING_TABLE_73 0x3f138 +#define ixMCARB_DRAM_TIMING_TABLE_74 0x3f13c +#define ixMCARB_DRAM_TIMING_TABLE_75 0x3f140 +#define ixMCARB_DRAM_TIMING_TABLE_76 0x3f144 +#define ixMCARB_DRAM_TIMING_TABLE_77 0x3f148 +#define ixMCARB_DRAM_TIMING_TABLE_78 0x3f14c +#define ixMCARB_DRAM_TIMING_TABLE_79 0x3f150 +#define ixMCARB_DRAM_TIMING_TABLE_80 0x3f154 +#define ixMCARB_DRAM_TIMING_TABLE_81 0x3f158 +#define ixMCARB_DRAM_TIMING_TABLE_82 0x3f15c +#define ixMCARB_DRAM_TIMING_TABLE_83 0x3f160 +#define ixMCARB_DRAM_TIMING_TABLE_84 0x3f164 +#define ixMCARB_DRAM_TIMING_TABLE_85 0x3f168 +#define ixMCARB_DRAM_TIMING_TABLE_86 0x3f16c +#define ixMCARB_DRAM_TIMING_TABLE_87 0x3f170 +#define ixMCARB_DRAM_TIMING_TABLE_88 0x3f174 +#define ixMCARB_DRAM_TIMING_TABLE_89 0x3f178 +#define ixMCARB_DRAM_TIMING_TABLE_90 0x3f17c +#define ixMCARB_DRAM_TIMING_TABLE_91 0x3f180 +#define ixMCARB_DRAM_TIMING_TABLE_92 0x3f184 +#define ixMCARB_DRAM_TIMING_TABLE_93 0x3f188 +#define ixMCARB_DRAM_TIMING_TABLE_94 0x3f18c +#define ixMCARB_DRAM_TIMING_TABLE_95 0x3f190 +#define ixMCARB_DRAM_TIMING_TABLE_96 0x3f194 +#define ixDPM_TABLE_1 0x3f198 +#define ixDPM_TABLE_2 0x3f19c +#define ixDPM_TABLE_3 0x3f1a0 +#define ixDPM_TABLE_4 0x3f1a4 +#define ixDPM_TABLE_5 0x3f1a8 +#define ixDPM_TABLE_6 0x3f1ac +#define ixDPM_TABLE_7 0x3f1b0 +#define ixDPM_TABLE_8 0x3f1b4 +#define ixDPM_TABLE_9 0x3f1b8 +#define ixDPM_TABLE_10 0x3f1bc +#define ixDPM_TABLE_11 0x3f1c0 +#define ixDPM_TABLE_12 0x3f1c4 +#define ixDPM_TABLE_13 0x3f1c8 +#define ixDPM_TABLE_14 0x3f1cc +#define ixDPM_TABLE_15 0x3f1d0 +#define ixDPM_TABLE_16 0x3f1d4 +#define ixDPM_TABLE_17 0x3f1d8 +#define ixDPM_TABLE_18 0x3f1dc +#define ixDPM_TABLE_19 0x3f1e0 +#define ixDPM_TABLE_20 0x3f1e4 +#define ixDPM_TABLE_21 0x3f1e8 +#define ixDPM_TABLE_22 0x3f1ec +#define ixDPM_TABLE_23 0x3f1f0 +#define ixDPM_TABLE_24 0x3f1f4 +#define ixDPM_TABLE_25 0x3f1f8 +#define ixDPM_TABLE_26 0x3f1fc +#define ixDPM_TABLE_27 0x3f200 +#define ixDPM_TABLE_28 0x3f204 +#define ixDPM_TABLE_29 0x3f208 +#define ixDPM_TABLE_30 0x3f20c +#define ixDPM_TABLE_31 0x3f210 +#define ixDPM_TABLE_32 0x3f214 +#define ixDPM_TABLE_33 0x3f218 +#define ixDPM_TABLE_34 0x3f21c +#define ixDPM_TABLE_35 0x3f220 +#define ixDPM_TABLE_36 0x3f224 +#define ixDPM_TABLE_37 0x3f228 +#define ixDPM_TABLE_38 0x3f22c +#define ixDPM_TABLE_39 0x3f230 +#define ixDPM_TABLE_40 0x3f234 +#define ixDPM_TABLE_41 0x3f238 +#define ixDPM_TABLE_42 0x3f23c +#define ixDPM_TABLE_43 0x3f240 +#define ixDPM_TABLE_44 0x3f244 +#define ixDPM_TABLE_45 0x3f248 +#define ixDPM_TABLE_46 0x3f24c +#define ixDPM_TABLE_47 0x3f250 +#define ixDPM_TABLE_48 0x3f254 +#define ixDPM_TABLE_49 0x3f258 +#define ixDPM_TABLE_50 0x3f25c +#define ixDPM_TABLE_51 0x3f260 +#define ixDPM_TABLE_52 0x3f264 +#define ixDPM_TABLE_53 0x3f268 +#define ixDPM_TABLE_54 0x3f26c +#define ixDPM_TABLE_55 0x3f270 +#define ixDPM_TABLE_56 0x3f274 +#define ixDPM_TABLE_57 0x3f278 +#define ixDPM_TABLE_58 0x3f27c +#define ixDPM_TABLE_59 0x3f280 +#define ixDPM_TABLE_60 0x3f284 +#define ixDPM_TABLE_61 0x3f288 +#define ixDPM_TABLE_62 0x3f28c +#define ixDPM_TABLE_63 0x3f290 +#define ixDPM_TABLE_64 0x3f294 +#define ixDPM_TABLE_65 0x3f298 +#define ixDPM_TABLE_66 0x3f29c +#define ixDPM_TABLE_67 0x3f2a0 +#define ixDPM_TABLE_68 0x3f2a4 +#define ixDPM_TABLE_69 0x3f2a8 +#define ixDPM_TABLE_70 0x3f2ac +#define ixDPM_TABLE_71 0x3f2b0 +#define ixDPM_TABLE_72 0x3f2b4 +#define ixDPM_TABLE_73 0x3f2b8 +#define ixDPM_TABLE_74 0x3f2bc +#define ixDPM_TABLE_75 0x3f2c0 +#define ixDPM_TABLE_76 0x3f2c4 +#define ixDPM_TABLE_77 0x3f2c8 +#define ixDPM_TABLE_78 0x3f2cc +#define ixDPM_TABLE_79 0x3f2d0 +#define ixDPM_TABLE_80 0x3f2d4 +#define ixDPM_TABLE_81 0x3f2d8 +#define ixDPM_TABLE_82 0x3f2dc +#define ixDPM_TABLE_83 0x3f2e0 +#define ixDPM_TABLE_84 0x3f2e4 +#define ixDPM_TABLE_85 0x3f2e8 +#define ixDPM_TABLE_86 0x3f2ec +#define ixDPM_TABLE_87 0x3f2f0 +#define ixDPM_TABLE_88 0x3f2f4 +#define ixDPM_TABLE_89 0x3f2f8 +#define ixDPM_TABLE_90 0x3f2fc +#define ixDPM_TABLE_91 0x3f300 +#define ixDPM_TABLE_92 0x3f304 +#define ixDPM_TABLE_93 0x3f308 +#define ixDPM_TABLE_94 0x3f30c +#define ixDPM_TABLE_95 0x3f310 +#define ixDPM_TABLE_96 0x3f314 +#define ixDPM_TABLE_97 0x3f318 +#define ixDPM_TABLE_98 0x3f31c +#define ixDPM_TABLE_99 0x3f320 +#define ixDPM_TABLE_100 0x3f324 +#define ixDPM_TABLE_101 0x3f328 +#define ixDPM_TABLE_102 0x3f32c +#define ixDPM_TABLE_103 0x3f330 +#define ixDPM_TABLE_104 0x3f334 +#define ixDPM_TABLE_105 0x3f338 +#define ixDPM_TABLE_106 0x3f33c +#define ixDPM_TABLE_107 0x3f340 +#define ixDPM_TABLE_108 0x3f344 +#define ixDPM_TABLE_109 0x3f348 +#define ixDPM_TABLE_110 0x3f34c +#define ixDPM_TABLE_111 0x3f350 +#define ixDPM_TABLE_112 0x3f354 +#define ixDPM_TABLE_113 0x3f358 +#define ixDPM_TABLE_114 0x3f35c +#define ixDPM_TABLE_115 0x3f360 +#define ixDPM_TABLE_116 0x3f364 +#define ixDPM_TABLE_117 0x3f368 +#define ixDPM_TABLE_118 0x3f36c +#define ixDPM_TABLE_119 0x3f370 +#define ixDPM_TABLE_120 0x3f374 +#define ixDPM_TABLE_121 0x3f378 +#define ixDPM_TABLE_122 0x3f37c +#define ixDPM_TABLE_123 0x3f380 +#define ixDPM_TABLE_124 0x3f384 +#define ixDPM_TABLE_125 0x3f388 +#define ixDPM_TABLE_126 0x3f38c +#define ixDPM_TABLE_127 0x3f390 +#define ixDPM_TABLE_128 0x3f394 +#define ixDPM_TABLE_129 0x3f398 +#define ixDPM_TABLE_130 0x3f39c +#define ixDPM_TABLE_131 0x3f3a0 +#define ixDPM_TABLE_132 0x3f3a4 +#define ixDPM_TABLE_133 0x3f3a8 +#define ixDPM_TABLE_134 0x3f3ac +#define ixDPM_TABLE_135 0x3f3b0 +#define ixDPM_TABLE_136 0x3f3b4 +#define ixDPM_TABLE_137 0x3f3b8 +#define ixDPM_TABLE_138 0x3f3bc +#define ixDPM_TABLE_139 0x3f3c0 +#define ixDPM_TABLE_140 0x3f3c4 +#define ixDPM_TABLE_141 0x3f3c8 +#define ixDPM_TABLE_142 0x3f3cc +#define ixDPM_TABLE_143 0x3f3d0 +#define ixDPM_TABLE_144 0x3f3d4 +#define ixDPM_TABLE_145 0x3f3d8 +#define ixDPM_TABLE_146 0x3f3dc +#define ixDPM_TABLE_147 0x3f3e0 +#define ixDPM_TABLE_148 0x3f3e4 +#define ixDPM_TABLE_149 0x3f3e8 +#define ixDPM_TABLE_150 0x3f3ec +#define ixDPM_TABLE_151 0x3f3f0 +#define ixDPM_TABLE_152 0x3f3f4 +#define ixDPM_TABLE_153 0x3f3f8 +#define ixDPM_TABLE_154 0x3f3fc +#define ixDPM_TABLE_155 0x3f400 +#define ixDPM_TABLE_156 0x3f404 +#define ixDPM_TABLE_157 0x3f408 +#define ixDPM_TABLE_158 0x3f40c +#define ixDPM_TABLE_159 0x3f410 +#define ixDPM_TABLE_160 0x3f414 +#define ixDPM_TABLE_161 0x3f418 +#define ixDPM_TABLE_162 0x3f41c +#define ixDPM_TABLE_163 0x3f420 +#define ixDPM_TABLE_164 0x3f424 +#define ixDPM_TABLE_165 0x3f428 +#define ixDPM_TABLE_166 0x3f42c +#define ixDPM_TABLE_167 0x3f430 +#define ixDPM_TABLE_168 0x3f434 +#define ixDPM_TABLE_169 0x3f438 +#define ixDPM_TABLE_170 0x3f43c +#define ixDPM_TABLE_171 0x3f440 +#define ixDPM_TABLE_172 0x3f444 +#define ixDPM_TABLE_173 0x3f448 +#define ixDPM_TABLE_174 0x3f44c +#define ixDPM_TABLE_175 0x3f450 +#define ixDPM_TABLE_176 0x3f454 +#define ixDPM_TABLE_177 0x3f458 +#define ixDPM_TABLE_178 0x3f45c +#define ixDPM_TABLE_179 0x3f460 +#define ixDPM_TABLE_180 0x3f464 +#define ixDPM_TABLE_181 0x3f468 +#define ixDPM_TABLE_182 0x3f46c +#define ixDPM_TABLE_183 0x3f470 +#define ixDPM_TABLE_184 0x3f474 +#define ixDPM_TABLE_185 0x3f478 +#define ixDPM_TABLE_186 0x3f47c +#define ixDPM_TABLE_187 0x3f480 +#define ixDPM_TABLE_188 0x3f484 +#define ixDPM_TABLE_189 0x3f488 +#define ixDPM_TABLE_190 0x3f48c +#define ixDPM_TABLE_191 0x3f490 +#define ixDPM_TABLE_192 0x3f494 +#define ixDPM_TABLE_193 0x3f498 +#define ixDPM_TABLE_194 0x3f49c +#define ixDPM_TABLE_195 0x3f4a0 +#define ixDPM_TABLE_196 0x3f4a4 +#define ixDPM_TABLE_197 0x3f4a8 +#define ixDPM_TABLE_198 0x3f4ac +#define ixDPM_TABLE_199 0x3f4b0 +#define ixDPM_TABLE_200 0x3f4b4 +#define ixDPM_TABLE_201 0x3f4b8 +#define ixDPM_TABLE_202 0x3f4bc +#define ixDPM_TABLE_203 0x3f4c0 +#define ixDPM_TABLE_204 0x3f4c4 +#define ixDPM_TABLE_205 0x3f4c8 +#define ixDPM_TABLE_206 0x3f4cc +#define ixDPM_TABLE_207 0x3f4d0 +#define ixDPM_TABLE_208 0x3f4d4 +#define ixDPM_TABLE_209 0x3f4d8 +#define ixDPM_TABLE_210 0x3f4dc +#define ixDPM_TABLE_211 0x3f4e0 +#define ixDPM_TABLE_212 0x3f4e4 +#define ixDPM_TABLE_213 0x3f4e8 +#define ixDPM_TABLE_214 0x3f4ec +#define ixDPM_TABLE_215 0x3f4f0 +#define ixDPM_TABLE_216 0x3f4f4 +#define ixDPM_TABLE_217 0x3f4f8 +#define ixDPM_TABLE_218 0x3f4fc +#define ixDPM_TABLE_219 0x3f500 +#define ixDPM_TABLE_220 0x3f504 +#define ixDPM_TABLE_221 0x3f508 +#define ixDPM_TABLE_222 0x3f50c +#define ixDPM_TABLE_223 0x3f510 +#define ixDPM_TABLE_224 0x3f514 +#define ixDPM_TABLE_225 0x3f518 +#define ixDPM_TABLE_226 0x3f51c +#define ixDPM_TABLE_227 0x3f520 +#define ixDPM_TABLE_228 0x3f524 +#define ixDPM_TABLE_229 0x3f528 +#define ixDPM_TABLE_230 0x3f52c +#define ixDPM_TABLE_231 0x3f530 +#define ixDPM_TABLE_232 0x3f534 +#define ixDPM_TABLE_233 0x3f538 +#define ixDPM_TABLE_234 0x3f53c +#define ixDPM_TABLE_235 0x3f540 +#define ixDPM_TABLE_236 0x3f544 +#define ixDPM_TABLE_237 0x3f548 +#define ixDPM_TABLE_238 0x3f54c +#define ixDPM_TABLE_239 0x3f550 +#define ixDPM_TABLE_240 0x3f554 +#define ixDPM_TABLE_241 0x3f558 +#define ixDPM_TABLE_242 0x3f55c +#define ixDPM_TABLE_243 0x3f560 +#define ixDPM_TABLE_244 0x3f564 +#define ixDPM_TABLE_245 0x3f568 +#define ixDPM_TABLE_246 0x3f56c +#define ixDPM_TABLE_247 0x3f570 +#define ixDPM_TABLE_248 0x3f574 +#define ixDPM_TABLE_249 0x3f578 +#define ixDPM_TABLE_250 0x3f57c +#define ixDPM_TABLE_251 0x3f580 +#define ixDPM_TABLE_252 0x3f584 +#define ixDPM_TABLE_253 0x3f588 +#define ixDPM_TABLE_254 0x3f58c +#define ixDPM_TABLE_255 0x3f590 +#define ixDPM_TABLE_256 0x3f594 +#define ixDPM_TABLE_257 0x3f598 +#define ixDPM_TABLE_258 0x3f59c +#define ixDPM_TABLE_259 0x3f5a0 +#define ixDPM_TABLE_260 0x3f5a4 +#define ixDPM_TABLE_261 0x3f5a8 +#define ixDPM_TABLE_262 0x3f5ac +#define ixDPM_TABLE_263 0x3f5b0 +#define ixDPM_TABLE_264 0x3f5b4 +#define ixDPM_TABLE_265 0x3f5b8 +#define ixDPM_TABLE_266 0x3f5bc +#define ixDPM_TABLE_267 0x3f5c0 +#define ixDPM_TABLE_268 0x3f5c4 +#define ixDPM_TABLE_269 0x3f5c8 +#define ixDPM_TABLE_270 0x3f5cc +#define ixDPM_TABLE_271 0x3f5d0 +#define ixDPM_TABLE_272 0x3f5d4 +#define ixDPM_TABLE_273 0x3f5d8 +#define ixDPM_TABLE_274 0x3f5dc +#define ixDPM_TABLE_275 0x3f5e0 +#define ixDPM_TABLE_276 0x3f5e4 +#define ixDPM_TABLE_277 0x3f5e8 +#define ixDPM_TABLE_278 0x3f5ec +#define ixDPM_TABLE_279 0x3f5f0 +#define ixDPM_TABLE_280 0x3f5f4 +#define ixDPM_TABLE_281 0x3f5f8 +#define ixDPM_TABLE_282 0x3f5fc +#define ixDPM_TABLE_283 0x3f600 +#define ixDPM_TABLE_284 0x3f604 +#define ixDPM_TABLE_285 0x3f608 +#define ixDPM_TABLE_286 0x3f60c +#define ixDPM_TABLE_287 0x3f610 +#define ixDPM_TABLE_288 0x3f614 +#define ixDPM_TABLE_289 0x3f618 +#define ixDPM_TABLE_290 0x3f61c +#define ixDPM_TABLE_291 0x3f620 +#define ixDPM_TABLE_292 0x3f624 +#define ixDPM_TABLE_293 0x3f628 +#define ixDPM_TABLE_294 0x3f62c +#define ixDPM_TABLE_295 0x3f630 +#define ixDPM_TABLE_296 0x3f634 +#define ixDPM_TABLE_297 0x3f638 +#define ixDPM_TABLE_298 0x3f63c +#define ixDPM_TABLE_299 0x3f640 +#define ixDPM_TABLE_300 0x3f644 +#define ixDPM_TABLE_301 0x3f648 +#define ixDPM_TABLE_302 0x3f64c +#define ixDPM_TABLE_303 0x3f650 +#define ixDPM_TABLE_304 0x3f654 +#define ixDPM_TABLE_305 0x3f658 +#define ixDPM_TABLE_306 0x3f65c +#define ixDPM_TABLE_307 0x3f660 +#define ixDPM_TABLE_308 0x3f664 +#define ixDPM_TABLE_309 0x3f668 +#define ixDPM_TABLE_310 0x3f66c +#define ixDPM_TABLE_311 0x3f670 +#define ixDPM_TABLE_312 0x3f674 +#define ixDPM_TABLE_313 0x3f678 +#define ixDPM_TABLE_314 0x3f67c +#define ixDPM_TABLE_315 0x3f680 +#define ixDPM_TABLE_316 0x3f684 +#define ixDPM_TABLE_317 0x3f688 +#define ixDPM_TABLE_318 0x3f68c +#define ixDPM_TABLE_319 0x3f690 +#define ixDPM_TABLE_320 0x3f694 +#define ixDPM_TABLE_321 0x3f698 +#define ixDPM_TABLE_322 0x3f69c +#define ixDPM_TABLE_323 0x3f6a0 +#define ixDPM_TABLE_324 0x3f6a4 +#define ixDPM_TABLE_325 0x3f6a8 +#define ixDPM_TABLE_326 0x3f6ac +#define ixDPM_TABLE_327 0x3f6b0 +#define ixDPM_TABLE_328 0x3f6b4 +#define ixDPM_TABLE_329 0x3f6b8 +#define ixDPM_TABLE_330 0x3f6bc +#define ixDPM_TABLE_331 0x3f6c0 +#define ixDPM_TABLE_332 0x3f6c4 +#define ixDPM_TABLE_333 0x3f6c8 +#define ixDPM_TABLE_334 0x3f6cc +#define ixDPM_TABLE_335 0x3f6d0 +#define ixDPM_TABLE_336 0x3f6d4 +#define ixDPM_TABLE_337 0x3f6d8 +#define ixDPM_TABLE_338 0x3f6dc +#define ixDPM_TABLE_339 0x3f6e0 +#define ixDPM_TABLE_340 0x3f6e4 +#define ixDPM_TABLE_341 0x3f6e8 +#define ixDPM_TABLE_342 0x3f6ec +#define ixDPM_TABLE_343 0x3f6f0 +#define ixDPM_TABLE_344 0x3f6f4 +#define ixDPM_TABLE_345 0x3f6f8 +#define ixDPM_TABLE_346 0x3f6fc +#define ixDPM_TABLE_347 0x3f700 +#define ixDPM_TABLE_348 0x3f704 +#define ixDPM_TABLE_349 0x3f708 +#define ixDPM_TABLE_350 0x3f70c +#define ixDPM_TABLE_351 0x3f710 +#define ixDPM_TABLE_352 0x3f714 +#define ixDPM_TABLE_353 0x3f718 +#define ixDPM_TABLE_354 0x3f71c +#define ixDPM_TABLE_355 0x3f720 +#define ixDPM_TABLE_356 0x3f724 +#define ixDPM_TABLE_357 0x3f728 +#define ixDPM_TABLE_358 0x3f72c +#define ixDPM_TABLE_359 0x3f730 +#define ixDPM_TABLE_360 0x3f734 +#define ixDPM_TABLE_361 0x3f738 +#define ixDPM_TABLE_362 0x3f73c +#define ixDPM_TABLE_363 0x3f740 +#define ixDPM_TABLE_364 0x3f744 +#define ixDPM_TABLE_365 0x3f748 +#define ixDPM_TABLE_366 0x3f74c +#define ixDPM_TABLE_367 0x3f750 +#define ixDPM_TABLE_368 0x3f754 +#define ixDPM_TABLE_369 0x3f758 +#define ixDPM_TABLE_370 0x3f75c +#define ixDPM_TABLE_371 0x3f760 +#define ixDPM_TABLE_372 0x3f764 +#define ixDPM_TABLE_373 0x3f768 +#define ixDPM_TABLE_374 0x3f76c +#define ixDPM_TABLE_375 0x3f770 +#define ixDPM_TABLE_376 0x3f774 +#define ixDPM_TABLE_377 0x3f778 +#define ixDPM_TABLE_378 0x3f77c +#define ixDPM_TABLE_379 0x3f780 +#define ixDPM_TABLE_380 0x3f784 +#define ixDPM_TABLE_381 0x3f788 +#define ixDPM_TABLE_382 0x3f78c +#define ixDPM_TABLE_383 0x3f790 +#define ixDPM_TABLE_384 0x3f794 +#define ixDPM_TABLE_385 0x3f798 +#define ixDPM_TABLE_386 0x3f79c +#define ixDPM_TABLE_387 0x3f7a0 +#define ixDPM_TABLE_388 0x3f7a4 +#define ixDPM_TABLE_389 0x3f7a8 +#define ixDPM_TABLE_390 0x3f7ac +#define ixDPM_TABLE_391 0x3f7b0 +#define ixDPM_TABLE_392 0x3f7b4 +#define ixDPM_TABLE_393 0x3f7b8 +#define ixDPM_TABLE_394 0x3f7bc +#define ixDPM_TABLE_395 0x3f7c0 +#define ixDPM_TABLE_396 0x3f7c4 +#define ixDPM_TABLE_397 0x3f7c8 +#define ixDPM_TABLE_398 0x3f7cc +#define ixDPM_TABLE_399 0x3f7d0 +#define ixDPM_TABLE_400 0x3f7d4 +#define ixDPM_TABLE_401 0x3f7d8 +#define ixDPM_TABLE_402 0x3f7dc +#define ixDPM_TABLE_403 0x3f7e0 +#define ixDPM_TABLE_404 0x3f7e4 +#define ixDPM_TABLE_405 0x3f7e8 +#define ixDPM_TABLE_406 0x3f7ec +#define ixDPM_TABLE_407 0x3f7f0 +#define ixDPM_TABLE_408 0x3f7f4 +#define ixDPM_TABLE_409 0x3f7f8 +#define ixDPM_TABLE_410 0x3f7fc +#define ixDPM_TABLE_411 0x3f800 +#define ixDPM_TABLE_412 0x3f804 +#define ixDPM_TABLE_413 0x3f808 +#define ixDPM_TABLE_414 0x3f80c +#define ixDPM_TABLE_415 0x3f810 +#define ixDPM_TABLE_416 0x3f814 +#define ixDPM_TABLE_417 0x3f818 +#define ixDPM_TABLE_418 0x3f81c +#define ixDPM_TABLE_419 0x3f820 +#define ixDPM_TABLE_420 0x3f824 +#define ixDPM_TABLE_421 0x3f828 +#define ixDPM_TABLE_422 0x3f82c +#define ixDPM_TABLE_423 0x3f830 +#define ixDPM_TABLE_424 0x3f834 +#define ixDPM_TABLE_425 0x3f838 +#define ixDPM_TABLE_426 0x3f83c +#define ixDPM_TABLE_427 0x3f840 +#define ixDPM_TABLE_428 0x3f844 +#define ixDPM_TABLE_429 0x3f848 +#define ixDPM_TABLE_430 0x3f84c +#define ixDPM_TABLE_431 0x3f850 +#define ixDPM_TABLE_432 0x3f854 +#define ixDPM_TABLE_433 0x3f858 +#define ixDPM_TABLE_434 0x3f85c +#define ixDPM_TABLE_435 0x3f860 +#define ixDPM_TABLE_436 0x3f864 +#define ixDPM_TABLE_437 0x3f868 +#define ixDPM_TABLE_438 0x3f86c +#define ixDPM_TABLE_439 0x3f870 +#define ixDPM_TABLE_440 0x3f874 +#define ixSOFT_REGISTERS_TABLE_1 0x3f89c +#define ixSOFT_REGISTERS_TABLE_2 0x3f8a0 +#define ixSOFT_REGISTERS_TABLE_3 0x3f8a4 +#define ixSOFT_REGISTERS_TABLE_4 0x3f8a8 +#define ixSOFT_REGISTERS_TABLE_5 0x3f8ac +#define ixSOFT_REGISTERS_TABLE_6 0x3f8b0 +#define ixSOFT_REGISTERS_TABLE_7 0x3f8b4 +#define ixSOFT_REGISTERS_TABLE_8 0x3f8b8 +#define ixSOFT_REGISTERS_TABLE_9 0x3f8bc +#define ixSOFT_REGISTERS_TABLE_10 0x3f8c0 +#define ixSOFT_REGISTERS_TABLE_11 0x3f8c4 +#define ixSOFT_REGISTERS_TABLE_12 0x3f8c8 +#define ixSOFT_REGISTERS_TABLE_13 0x3f8cc +#define ixSOFT_REGISTERS_TABLE_14 0x3f8d0 +#define ixSOFT_REGISTERS_TABLE_15 0x3f8d4 +#define ixSOFT_REGISTERS_TABLE_16 0x3f8d8 +#define ixSOFT_REGISTERS_TABLE_17 0x3f8dc +#define ixSOFT_REGISTERS_TABLE_18 0x3f8e0 +#define ixSOFT_REGISTERS_TABLE_19 0x3f8e4 +#define ixSOFT_REGISTERS_TABLE_20 0x3f8e8 +#define ixSOFT_REGISTERS_TABLE_21 0x3f8ec +#define ixSOFT_REGISTERS_TABLE_22 0x3f8f0 +#define ixSOFT_REGISTERS_TABLE_23 0x3f8f4 +#define ixSOFT_REGISTERS_TABLE_24 0x3f8f8 +#define ixSOFT_REGISTERS_TABLE_25 0x3f8fc +#define ixSOFT_REGISTERS_TABLE_26 0x3f900 +#define ixSOFT_REGISTERS_TABLE_27 0x3f904 +#define ixSOFT_REGISTERS_TABLE_28 0x3f888 +#define ixSOFT_REGISTERS_TABLE_29 0x3f90c +#define ixSOFT_REGISTERS_TABLE_30 0x3f910 +#define ixPM_FUSES_1 0x3f914 +#define ixPM_FUSES_2 0x3f918 +#define ixPM_FUSES_3 0x3f91c +#define ixPM_FUSES_4 0x3f920 +#define ixPM_FUSES_5 0x3f924 +#define ixPM_FUSES_6 0x3f928 +#define ixPM_FUSES_7 0x3f92c +#define ixPM_FUSES_8 0x3f930 +#define ixPM_FUSES_9 0x3f934 +#define ixPM_FUSES_10 0x3f938 +#define ixPM_FUSES_11 0x3f93c +#define ixPM_FUSES_12 0x3f940 +#define ixPM_FUSES_13 0x3f944 +#define ixPM_FUSES_14 0x3f948 +#define ixPM_FUSES_15 0x3f94c +#define ixSMU_PM_STATUS_0 0x3fe00 +#define ixSMU_PM_STATUS_1 0x3fe04 +#define ixSMU_PM_STATUS_2 0x3fe08 +#define ixSMU_PM_STATUS_3 0x3fe0c +#define ixSMU_PM_STATUS_4 0x3fe10 +#define ixSMU_PM_STATUS_5 0x3fe14 +#define ixSMU_PM_STATUS_6 0x3fe18 +#define ixSMU_PM_STATUS_7 0x3fe1c +#define ixSMU_PM_STATUS_8 0x3fe20 +#define ixSMU_PM_STATUS_9 0x3fe24 +#define ixSMU_PM_STATUS_10 0x3fe28 +#define ixSMU_PM_STATUS_11 0x3fe2c +#define ixSMU_PM_STATUS_12 0x3fe30 +#define ixSMU_PM_STATUS_13 0x3fe34 +#define ixSMU_PM_STATUS_14 0x3fe38 +#define ixSMU_PM_STATUS_15 0x3fe3c +#define ixSMU_PM_STATUS_16 0x3fe40 +#define ixSMU_PM_STATUS_17 0x3fe44 +#define ixSMU_PM_STATUS_18 0x3fe48 +#define ixSMU_PM_STATUS_19 0x3fe4c +#define ixSMU_PM_STATUS_20 0x3fe50 +#define ixSMU_PM_STATUS_21 0x3fe54 +#define ixSMU_PM_STATUS_22 0x3fe58 +#define ixSMU_PM_STATUS_23 0x3fe5c +#define ixSMU_PM_STATUS_24 0x3fe60 +#define ixSMU_PM_STATUS_25 0x3fe64 +#define ixSMU_PM_STATUS_26 0x3fe68 +#define ixSMU_PM_STATUS_27 0x3fe6c +#define ixSMU_PM_STATUS_28 0x3fe70 +#define ixSMU_PM_STATUS_29 0x3fe74 +#define ixSMU_PM_STATUS_30 0x3fe78 +#define ixSMU_PM_STATUS_31 0x3fe7c +#define ixSMU_PM_STATUS_32 0x3fe80 +#define ixSMU_PM_STATUS_33 0x3fe84 +#define ixSMU_PM_STATUS_34 0x3fe88 +#define ixSMU_PM_STATUS_35 0x3fe8c +#define ixSMU_PM_STATUS_36 0x3fe90 +#define ixSMU_PM_STATUS_37 0x3fe94 +#define ixSMU_PM_STATUS_38 0x3fe98 +#define ixSMU_PM_STATUS_39 0x3fe9c +#define ixSMU_PM_STATUS_40 0x3fea0 +#define ixSMU_PM_STATUS_41 0x3fea4 +#define ixSMU_PM_STATUS_42 0x3fea8 +#define ixSMU_PM_STATUS_43 0x3feac +#define ixSMU_PM_STATUS_44 0x3feb0 +#define ixSMU_PM_STATUS_45 0x3feb4 +#define ixSMU_PM_STATUS_46 0x3feb8 +#define ixSMU_PM_STATUS_47 0x3febc +#define ixSMU_PM_STATUS_48 0x3fec0 +#define ixSMU_PM_STATUS_49 0x3fec4 +#define ixSMU_PM_STATUS_50 0x3fec8 +#define ixSMU_PM_STATUS_51 0x3fecc +#define ixSMU_PM_STATUS_52 0x3fed0 +#define ixSMU_PM_STATUS_53 0x3fed4 +#define ixSMU_PM_STATUS_54 0x3fed8 +#define ixSMU_PM_STATUS_55 0x3fedc +#define ixSMU_PM_STATUS_56 0x3fee0 +#define ixSMU_PM_STATUS_57 0x3fee4 +#define ixSMU_PM_STATUS_58 0x3fee8 +#define ixSMU_PM_STATUS_59 0x3feec +#define ixSMU_PM_STATUS_60 0x3fef0 +#define ixSMU_PM_STATUS_61 0x3fef4 +#define ixSMU_PM_STATUS_62 0x3fef8 +#define ixSMU_PM_STATUS_63 0x3fefc +#define ixSMU_PM_STATUS_64 0x3ff00 +#define ixSMU_PM_STATUS_65 0x3ff04 +#define ixSMU_PM_STATUS_66 0x3ff08 +#define ixSMU_PM_STATUS_67 0x3ff0c +#define ixSMU_PM_STATUS_68 0x3ff10 +#define ixSMU_PM_STATUS_69 0x3ff14 +#define ixSMU_PM_STATUS_70 0x3ff18 +#define ixSMU_PM_STATUS_71 0x3ff1c +#define ixSMU_PM_STATUS_72 0x3ff20 +#define ixSMU_PM_STATUS_73 0x3ff24 +#define ixSMU_PM_STATUS_74 0x3ff28 +#define ixSMU_PM_STATUS_75 0x3ff2c +#define ixSMU_PM_STATUS_76 0x3ff30 +#define ixSMU_PM_STATUS_77 0x3ff34 +#define ixSMU_PM_STATUS_78 0x3ff38 +#define ixSMU_PM_STATUS_79 0x3ff3c +#define ixSMU_PM_STATUS_80 0x3ff40 +#define ixSMU_PM_STATUS_81 0x3ff44 +#define ixSMU_PM_STATUS_82 0x3ff48 +#define ixSMU_PM_STATUS_83 0x3ff4c +#define ixSMU_PM_STATUS_84 0x3ff50 +#define ixSMU_PM_STATUS_85 0x3ff54 +#define ixSMU_PM_STATUS_86 0x3ff58 +#define ixSMU_PM_STATUS_87 0x3ff5c +#define ixSMU_PM_STATUS_88 0x3ff60 +#define ixSMU_PM_STATUS_89 0x3ff64 +#define ixSMU_PM_STATUS_90 0x3ff68 +#define ixSMU_PM_STATUS_91 0x3ff6c +#define ixSMU_PM_STATUS_92 0x3ff70 +#define ixSMU_PM_STATUS_93 0x3ff74 +#define ixSMU_PM_STATUS_94 0x3ff78 +#define ixSMU_PM_STATUS_95 0x3ff7c +#define ixSMU_PM_STATUS_96 0x3ff80 +#define ixSMU_PM_STATUS_97 0x3ff84 +#define ixSMU_PM_STATUS_98 0x3ff88 +#define ixSMU_PM_STATUS_99 0x3ff8c +#define ixSMU_PM_STATUS_100 0x3ff90 +#define ixSMU_PM_STATUS_101 0x3ff94 +#define ixSMU_PM_STATUS_102 0x3ff98 +#define ixSMU_PM_STATUS_103 0x3ff9c +#define ixSMU_PM_STATUS_104 0x3ffa0 +#define ixSMU_PM_STATUS_105 0x3ffa4 +#define ixSMU_PM_STATUS_106 0x3ffa8 +#define ixSMU_PM_STATUS_107 0x3ffac +#define ixSMU_PM_STATUS_108 0x3ffb0 +#define ixSMU_PM_STATUS_109 0x3ffb4 +#define ixSMU_PM_STATUS_110 0x3ffb8 +#define ixSMU_PM_STATUS_111 0x3ffbc +#define ixSMU_PM_STATUS_112 0x3ffc0 +#define ixSMU_PM_STATUS_113 0x3ffc4 +#define ixSMU_PM_STATUS_114 0x3ffc8 +#define ixSMU_PM_STATUS_115 0x3ffcc +#define ixSMU_PM_STATUS_116 0x3ffd0 +#define ixSMU_PM_STATUS_117 0x3ffd4 +#define ixSMU_PM_STATUS_118 0x3ffd8 +#define ixSMU_PM_STATUS_119 0x3ffdc +#define ixSMU_PM_STATUS_120 0x3ffe0 +#define ixSMU_PM_STATUS_121 0x3ffe4 +#define ixSMU_PM_STATUS_122 0x3ffe8 +#define ixSMU_PM_STATUS_123 0x3ffec +#define ixSMU_PM_STATUS_124 0x3fff0 +#define ixSMU_PM_STATUS_125 0x3fff4 +#define ixSMU_PM_STATUS_126 0x3fff8 +#define ixSMU_PM_STATUS_127 0x3fffc +#define ixCG_THERMAL_INT_ENA 0xc2100024 +#define ixCG_THERMAL_INT_CTRL 0xc2100028 +#define ixCG_THERMAL_INT_STATUS 0xc210002c +#define ixCG_THERMAL_CTRL 0xc0300004 +#define ixCG_THERMAL_STATUS 0xc0300008 +#define ixCG_THERMAL_INT 0xc030000c +#define ixCG_MULT_THERMAL_CTRL 0xc0300010 +#define ixCG_MULT_THERMAL_STATUS 0xc0300014 +#define ixTHM_TMON2_CTRL 0xc0300034 +#define ixTHM_TMON2_CTRL2 0xc0300038 +#define ixTHM_TMON2_CSR_WR 0xc0300054 +#define ixTHM_TMON2_CSR_RD 0xc0300058 +#define ixCG_FDO_CTRL0 0xc0300064 +#define ixCG_FDO_CTRL1 0xc0300068 +#define ixCG_FDO_CTRL2 0xc030006c +#define ixCG_TACH_CTRL 0xc0300070 +#define ixCG_TACH_STATUS 0xc0300074 +#define ixCC_THM_STRAPS0 0xc0300080 +#define ixTHM_TMON0_RDIL0_DATA 0xc0300100 +#define ixTHM_TMON0_RDIL1_DATA 0xc0300104 +#define ixTHM_TMON0_RDIL2_DATA 0xc0300108 +#define ixTHM_TMON0_RDIL3_DATA 0xc030010c +#define ixTHM_TMON0_RDIL4_DATA 0xc0300110 +#define ixTHM_TMON0_RDIL5_DATA 0xc0300114 +#define ixTHM_TMON0_RDIL6_DATA 0xc0300118 +#define ixTHM_TMON0_RDIL7_DATA 0xc030011c +#define ixTHM_TMON0_RDIL8_DATA 0xc0300120 +#define ixTHM_TMON0_RDIL9_DATA 0xc0300124 +#define ixTHM_TMON0_RDIL10_DATA 0xc0300128 +#define ixTHM_TMON0_RDIL11_DATA 0xc030012c +#define ixTHM_TMON0_RDIL12_DATA 0xc0300130 +#define ixTHM_TMON0_RDIL13_DATA 0xc0300134 +#define ixTHM_TMON0_RDIL14_DATA 0xc0300138 +#define ixTHM_TMON0_RDIL15_DATA 0xc030013c +#define ixTHM_TMON0_RDIR0_DATA 0xc0300140 +#define ixTHM_TMON0_RDIR1_DATA 0xc0300144 +#define ixTHM_TMON0_RDIR2_DATA 0xc0300148 +#define ixTHM_TMON0_RDIR3_DATA 0xc030014c +#define ixTHM_TMON0_RDIR4_DATA 0xc0300150 +#define ixTHM_TMON0_RDIR5_DATA 0xc0300154 +#define ixTHM_TMON0_RDIR6_DATA 0xc0300158 +#define ixTHM_TMON0_RDIR7_DATA 0xc030015c +#define ixTHM_TMON0_RDIR8_DATA 0xc0300160 +#define ixTHM_TMON0_RDIR9_DATA 0xc0300164 +#define ixTHM_TMON0_RDIR10_DATA 0xc0300168 +#define ixTHM_TMON0_RDIR11_DATA 0xc030016c +#define ixTHM_TMON0_RDIR12_DATA 0xc0300170 +#define ixTHM_TMON0_RDIR13_DATA 0xc0300174 +#define ixTHM_TMON0_RDIR14_DATA 0xc0300178 +#define ixTHM_TMON0_RDIR15_DATA 0xc030017c +#define ixTHM_TMON1_RDIL0_DATA 0xc0300180 +#define ixTHM_TMON1_RDIL1_DATA 0xc0300184 +#define ixTHM_TMON1_RDIL2_DATA 0xc0300188 +#define ixTHM_TMON1_RDIL3_DATA 0xc030018c +#define ixTHM_TMON1_RDIL4_DATA 0xc0300190 +#define ixTHM_TMON1_RDIL5_DATA 0xc0300194 +#define ixTHM_TMON1_RDIL6_DATA 0xc0300198 +#define ixTHM_TMON1_RDIL7_DATA 0xc030019c +#define ixTHM_TMON1_RDIL8_DATA 0xc03001a0 +#define ixTHM_TMON1_RDIL9_DATA 0xc03001a4 +#define ixTHM_TMON1_RDIL10_DATA 0xc03001a8 +#define ixTHM_TMON1_RDIL11_DATA 0xc03001ac +#define ixTHM_TMON1_RDIL12_DATA 0xc03001b0 +#define ixTHM_TMON1_RDIL13_DATA 0xc03001b4 +#define ixTHM_TMON1_RDIL14_DATA 0xc03001b8 +#define ixTHM_TMON1_RDIL15_DATA 0xc03001bc +#define ixTHM_TMON1_RDIR0_DATA 0xc03001c0 +#define ixTHM_TMON1_RDIR1_DATA 0xc03001c4 +#define ixTHM_TMON1_RDIR2_DATA 0xc03001c8 +#define ixTHM_TMON1_RDIR3_DATA 0xc03001cc +#define ixTHM_TMON1_RDIR4_DATA 0xc03001d0 +#define ixTHM_TMON1_RDIR5_DATA 0xc03001d4 +#define ixTHM_TMON1_RDIR6_DATA 0xc03001d8 +#define ixTHM_TMON1_RDIR7_DATA 0xc03001dc +#define ixTHM_TMON1_RDIR8_DATA 0xc03001e0 +#define ixTHM_TMON1_RDIR9_DATA 0xc03001e4 +#define ixTHM_TMON1_RDIR10_DATA 0xc03001e8 +#define ixTHM_TMON1_RDIR11_DATA 0xc03001ec +#define ixTHM_TMON1_RDIR12_DATA 0xc03001f0 +#define ixTHM_TMON1_RDIR13_DATA 0xc03001f4 +#define ixTHM_TMON1_RDIR14_DATA 0xc03001f8 +#define ixTHM_TMON1_RDIR15_DATA 0xc03001fc +#define ixTHM_TMON2_RDIL0_DATA 0xc0300200 +#define ixTHM_TMON2_RDIL1_DATA 0xc0300204 +#define ixTHM_TMON2_RDIL2_DATA 0xc0300208 +#define ixTHM_TMON2_RDIL3_DATA 0xc030020c +#define ixTHM_TMON2_RDIL4_DATA 0xc0300210 +#define ixTHM_TMON2_RDIL5_DATA 0xc0300214 +#define ixTHM_TMON2_RDIL6_DATA 0xc0300218 +#define ixTHM_TMON2_RDIL7_DATA 0xc030021c +#define ixTHM_TMON2_RDIL8_DATA 0xc0300220 +#define ixTHM_TMON2_RDIL9_DATA 0xc0300224 +#define ixTHM_TMON2_RDIL10_DATA 0xc0300228 +#define ixTHM_TMON2_RDIL11_DATA 0xc030022c +#define ixTHM_TMON2_RDIL12_DATA 0xc0300230 +#define ixTHM_TMON2_RDIL13_DATA 0xc0300234 +#define ixTHM_TMON2_RDIL14_DATA 0xc0300238 +#define ixTHM_TMON2_RDIL15_DATA 0xc030023c +#define ixTHM_TMON2_RDIR0_DATA 0xc0300240 +#define ixTHM_TMON2_RDIR1_DATA 0xc0300244 +#define ixTHM_TMON2_RDIR2_DATA 0xc0300248 +#define ixTHM_TMON2_RDIR3_DATA 0xc030024c +#define ixTHM_TMON2_RDIR4_DATA 0xc0300250 +#define ixTHM_TMON2_RDIR5_DATA 0xc0300254 +#define ixTHM_TMON2_RDIR6_DATA 0xc0300258 +#define ixTHM_TMON2_RDIR7_DATA 0xc030025c +#define ixTHM_TMON2_RDIR8_DATA 0xc0300260 +#define ixTHM_TMON2_RDIR9_DATA 0xc0300264 +#define ixTHM_TMON2_RDIR10_DATA 0xc0300268 +#define ixTHM_TMON2_RDIR11_DATA 0xc030026c +#define ixTHM_TMON2_RDIR12_DATA 0xc0300270 +#define ixTHM_TMON2_RDIR13_DATA 0xc0300274 +#define ixTHM_TMON2_RDIR14_DATA 0xc0300278 +#define ixTHM_TMON2_RDIR15_DATA 0xc030027c +#define ixTHM_TMON0_INT_DATA 0xc0300300 +#define ixTHM_TMON1_INT_DATA 0xc0300304 +#define ixTHM_TMON2_INT_DATA 0xc0300308 +#define ixTHM_TMON0_DEBUG 0xc0300310 +#define ixTHM_TMON1_DEBUG 0xc0300314 +#define ixTHM_TMON2_DEBUG 0xc0300318 +#define ixTHM_TMON0_STATUS 0xc0300320 +#define ixTHM_TMON1_STATUS 0xc0300324 +#define ixTHM_TMON2_STATUS 0xc0300328 +#define ixGENERAL_PWRMGT 0xc0200000 +#define ixCNB_PWRMGT_CNTL 0xc0200004 +#define ixSCLK_PWRMGT_CNTL 0xc0200008 +#define ixTARGET_AND_CURRENT_PROFILE_INDEX 0xc0200014 +#define ixPWR_PCC_CONTROL 0xc0200018 +#define ixPWR_PCC_GPIO_SELECT 0xc020001c +#define ixCG_FREQ_TRAN_VOTING_0 0xc02001a8 +#define ixCG_FREQ_TRAN_VOTING_1 0xc02001ac +#define ixCG_FREQ_TRAN_VOTING_2 0xc02001b0 +#define ixCG_FREQ_TRAN_VOTING_3 0xc02001b4 +#define ixCG_FREQ_TRAN_VOTING_4 0xc02001b8 +#define ixCG_FREQ_TRAN_VOTING_5 0xc02001bc +#define ixCG_FREQ_TRAN_VOTING_6 0xc02001c0 +#define ixCG_FREQ_TRAN_VOTING_7 0xc02001c4 +#define ixPLL_TEST_CNTL 0xc020003c +#define ixCG_STATIC_SCREEN_PARAMETER 0xc0200044 +#define ixCG_DISPLAY_GAP_CNTL 0xc0200060 +#define ixCG_DISPLAY_GAP_CNTL2 0xc0200230 +#define ixCG_ACPI_CNTL 0xc0200064 +#define ixSCLK_DEEP_SLEEP_CNTL 0xc0200080 +#define ixSCLK_DEEP_SLEEP_CNTL2 0xc0200084 +#define ixSCLK_DEEP_SLEEP_CNTL3 0xc020009c +#define ixSCLK_DEEP_SLEEP_MISC_CNTL 0xc0200088 +#define ixLCLK_DEEP_SLEEP_CNTL 0xc020008c +#define ixLCLK_DEEP_SLEEP_CNTL2 0xc0200310 +#define ixTARGET_AND_CURRENT_PROFILE_INDEX_1 0xc02000f0 +#define ixCG_ULV_PARAMETER 0xc020015c +#define ixSCLK_MIN_DIV 0xc02003ac +#define ixPWR_AVFS_SEL 0xc0200384 +#define ixPWR_AVFS_CNTL 0xc0200388 +#define ixPWR_AVFS0_CNTL_STATUS 0xc0200400 +#define ixPWR_AVFS1_CNTL_STATUS 0xc0200404 +#define ixPWR_AVFS2_CNTL_STATUS 0xc0200408 +#define ixPWR_AVFS3_CNTL_STATUS 0xc020040c +#define ixPWR_AVFS4_CNTL_STATUS 0xc0200410 +#define ixPWR_AVFS5_CNTL_STATUS 0xc0200414 +#define ixPWR_AVFS6_CNTL_STATUS 0xc0200418 +#define ixPWR_AVFS7_CNTL_STATUS 0xc020041c +#define ixPWR_AVFS8_CNTL_STATUS 0xc0200420 +#define ixPWR_AVFS9_CNTL_STATUS 0xc0200424 +#define ixPWR_AVFS10_CNTL_STATUS 0xc0200428 +#define ixPWR_AVFS11_CNTL_STATUS 0xc020042c +#define ixPWR_AVFS12_CNTL_STATUS 0xc0200430 +#define ixPWR_AVFS13_CNTL_STATUS 0xc0200434 +#define ixPWR_AVFS14_CNTL_STATUS 0xc0200438 +#define ixPWR_AVFS15_CNTL_STATUS 0xc020043c +#define ixPWR_AVFS16_CNTL_STATUS 0xc0200440 +#define ixPWR_AVFS17_CNTL_STATUS 0xc0200444 +#define ixPWR_AVFS18_CNTL_STATUS 0xc0200448 +#define ixPWR_AVFS19_CNTL_STATUS 0xc020044c +#define ixPWR_AVFS20_CNTL_STATUS 0xc0200450 +#define ixPWR_AVFS21_CNTL_STATUS 0xc0200454 +#define ixPWR_AVFS22_CNTL_STATUS 0xc0200458 +#define ixPWR_AVFS23_CNTL_STATUS 0xc020045c +#define ixPWR_AVFS24_CNTL_STATUS 0xc0200460 +#define ixPWR_AVFS25_CNTL_STATUS 0xc0200464 +#define ixPWR_AVFS26_CNTL_STATUS 0xc0200468 +#define ixPWR_AVFS27_CNTL_STATUS 0xc020046c +#define ixPWR_CKS_ENABLE 0xc020034c +#define ixPWR_CKS_CNTL 0xc0200350 +#define ixPWR_DISP_TIMER_CONTROL 0xc02003c0 +#define ixPWR_DISP_TIMER_DEBUG 0xc02003c4 +#define ixPWR_DISP_TIMER2_CONTROL 0xc02003c8 +#define ixPWR_DISP_TIMER2_DEBUG 0xc02003cc +#define ixPWR_DISP_TIMER_CONTROL2 0xc0200378 +#define ixVDDGFX_IDLE_PARAMETER 0xc020036c +#define ixVDDGFX_IDLE_CONTROL 0xc0200370 +#define ixVDDGFX_IDLE_EXIT 0xc0200374 +#define ixLCAC_MC0_CNTL 0xc0400130 +#define ixLCAC_MC0_OVR_SEL 0xc0400134 +#define ixLCAC_MC0_OVR_VAL 0xc0400138 +#define ixLCAC_MC1_CNTL 0xc040013c +#define ixLCAC_MC1_OVR_SEL 0xc0400140 +#define ixLCAC_MC1_OVR_VAL 0xc0400144 +#define ixLCAC_MC2_CNTL 0xc0400148 +#define ixLCAC_MC2_OVR_SEL 0xc040014c +#define ixLCAC_MC2_OVR_VAL 0xc0400150 +#define ixLCAC_MC3_CNTL 0xc0400154 +#define ixLCAC_MC3_OVR_SEL 0xc0400158 +#define ixLCAC_MC3_OVR_VAL 0xc040015c +#define ixLCAC_MC4_CNTL 0xc0400d60 +#define ixLCAC_MC4_OVR_SEL 0xc0400d64 +#define ixLCAC_MC4_OVR_VAL 0xc0400d68 +#define ixLCAC_MC5_CNTL 0xc0400d6c +#define ixLCAC_MC5_OVR_SEL 0xc0400d70 +#define ixLCAC_MC5_OVR_VAL 0xc0400d74 +#define ixLCAC_MC6_CNTL 0xc0400d78 +#define ixLCAC_MC6_OVR_SEL 0xc0400d7c +#define ixLCAC_MC6_OVR_VAL 0xc0400d80 +#define ixLCAC_MC7_CNTL 0xc0400d84 +#define ixLCAC_MC7_OVR_SEL 0xc0400d88 +#define ixLCAC_MC7_OVR_VAL 0xc0400d8c +#define ixLCAC_CPL_CNTL 0xc0400160 +#define ixLCAC_CPL_OVR_SEL 0xc0400164 +#define ixLCAC_CPL_OVR_VAL 0xc0400168 +#define mmROM_SMC_IND_INDEX 0x80 +#define mmROM0_ROM_SMC_IND_INDEX 0x80 +#define mmROM1_ROM_SMC_IND_INDEX 0x82 +#define mmROM2_ROM_SMC_IND_INDEX 0x84 +#define mmROM3_ROM_SMC_IND_INDEX 0x86 +#define mmROM_SMC_IND_DATA 0x81 +#define mmROM0_ROM_SMC_IND_DATA 0x81 +#define mmROM1_ROM_SMC_IND_DATA 0x83 +#define mmROM2_ROM_SMC_IND_DATA 0x85 +#define mmROM3_ROM_SMC_IND_DATA 0x87 +#define ixROM_CNTL 0xc0600000 +#define ixPAGE_MIRROR_CNTL 0xc0600004 +#define ixROM_STATUS 0xc0600008 +#define ixCGTT_ROM_CLK_CTRL0 0xc060000c +#define ixROM_INDEX 0xc0600010 +#define ixROM_DATA 0xc0600014 +#define ixROM_START 0xc0600018 +#define ixROM_SW_CNTL 0xc060001c +#define ixROM_SW_STATUS 0xc0600020 +#define ixROM_SW_COMMAND 0xc0600024 +#define ixROM_SW_DATA_1 0xc0600028 +#define ixROM_SW_DATA_2 0xc060002c +#define ixROM_SW_DATA_3 0xc0600030 +#define ixROM_SW_DATA_4 0xc0600034 +#define ixROM_SW_DATA_5 0xc0600038 +#define ixROM_SW_DATA_6 0xc060003c +#define ixROM_SW_DATA_7 0xc0600040 +#define ixROM_SW_DATA_8 0xc0600044 +#define ixROM_SW_DATA_9 0xc0600048 +#define ixROM_SW_DATA_10 0xc060004c +#define ixROM_SW_DATA_11 0xc0600050 +#define ixROM_SW_DATA_12 0xc0600054 +#define ixROM_SW_DATA_13 0xc0600058 +#define ixROM_SW_DATA_14 0xc060005c +#define ixROM_SW_DATA_15 0xc0600060 +#define ixROM_SW_DATA_16 0xc0600064 +#define ixROM_SW_DATA_17 0xc0600068 +#define ixROM_SW_DATA_18 0xc060006c +#define ixROM_SW_DATA_19 0xc0600070 +#define ixROM_SW_DATA_20 0xc0600074 +#define ixROM_SW_DATA_21 0xc0600078 +#define ixROM_SW_DATA_22 0xc060007c +#define ixROM_SW_DATA_23 0xc0600080 +#define ixROM_SW_DATA_24 0xc0600084 +#define ixROM_SW_DATA_25 0xc0600088 +#define ixROM_SW_DATA_26 0xc060008c +#define ixROM_SW_DATA_27 0xc0600090 +#define ixROM_SW_DATA_28 0xc0600094 +#define ixROM_SW_DATA_29 0xc0600098 +#define ixROM_SW_DATA_30 0xc060009c +#define ixROM_SW_DATA_31 0xc06000a0 +#define ixROM_SW_DATA_32 0xc06000a4 +#define ixROM_SW_DATA_33 0xc06000a8 +#define ixROM_SW_DATA_34 0xc06000ac +#define ixROM_SW_DATA_35 0xc06000b0 +#define ixROM_SW_DATA_36 0xc06000b4 +#define ixROM_SW_DATA_37 0xc06000b8 +#define ixROM_SW_DATA_38 0xc06000bc +#define ixROM_SW_DATA_39 0xc06000c0 +#define ixROM_SW_DATA_40 0xc06000c4 +#define ixROM_SW_DATA_41 0xc06000c8 +#define ixROM_SW_DATA_42 0xc06000cc +#define ixROM_SW_DATA_43 0xc06000d0 +#define ixROM_SW_DATA_44 0xc06000d4 +#define ixROM_SW_DATA_45 0xc06000d8 +#define ixROM_SW_DATA_46 0xc06000dc +#define ixROM_SW_DATA_47 0xc06000e0 +#define ixROM_SW_DATA_48 0xc06000e4 +#define ixROM_SW_DATA_49 0xc06000e8 +#define ixROM_SW_DATA_50 0xc06000ec +#define ixROM_SW_DATA_51 0xc06000f0 +#define ixROM_SW_DATA_52 0xc06000f4 +#define ixROM_SW_DATA_53 0xc06000f8 +#define ixROM_SW_DATA_54 0xc06000fc +#define ixROM_SW_DATA_55 0xc0600100 +#define ixROM_SW_DATA_56 0xc0600104 +#define ixROM_SW_DATA_57 0xc0600108 +#define ixROM_SW_DATA_58 0xc060010c +#define ixROM_SW_DATA_59 0xc0600110 +#define ixROM_SW_DATA_60 0xc0600114 +#define ixROM_SW_DATA_61 0xc0600118 +#define ixROM_SW_DATA_62 0xc060011c +#define ixROM_SW_DATA_63 0xc0600120 +#define ixROM_SW_DATA_64 0xc0600124 +#define mmGC_CAC_CGTT_CLK_CTRL 0x3292 +#define mmSE_CAC_CGTT_CLK_CTRL 0x3293 +#define mmGC_CAC_LKG_AGGR_LOWER 0x3296 +#define mmGC_CAC_LKG_AGGR_UPPER 0x3297 +#define ixGC_CAC_WEIGHT_CU_0 0x32 +#define ixGC_CAC_WEIGHT_CU_1 0x33 +#define ixGC_CAC_WEIGHT_CU_2 0x34 +#define ixGC_CAC_WEIGHT_CU_3 0x35 +#define ixGC_CAC_WEIGHT_CU_4 0x36 +#define ixGC_CAC_WEIGHT_CU_5 0x37 +#define ixGC_CAC_WEIGHT_CU_6 0x38 +#define ixGC_CAC_WEIGHT_CU_7 0x39 +#define ixGC_CAC_ACC_CU0 0xba +#define ixGC_CAC_ACC_CU1 0xbb +#define ixGC_CAC_ACC_CU2 0xbc +#define ixGC_CAC_ACC_CU3 0xbd +#define ixGC_CAC_ACC_CU4 0xbe +#define ixGC_CAC_ACC_CU5 0xbf +#define ixGC_CAC_ACC_CU6 0xc0 +#define ixGC_CAC_ACC_CU7 0xc1 +#define ixGC_CAC_ACC_CU8 0xc2 +#define ixGC_CAC_ACC_CU9 0xc3 +#define ixGC_CAC_ACC_CU10 0xc4 +#define ixGC_CAC_ACC_CU11 0xc5 +#define ixGC_CAC_ACC_CU12 0xc6 +#define ixGC_CAC_ACC_CU13 0xc7 +#define ixGC_CAC_ACC_CU14 0xc8 +#define ixGC_CAC_ACC_CU15 0xc9 +#define ixGC_CAC_OVRD_CU 0xe7 + +#endif /* SMU_7_1_3_D_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h new file mode 100644 index 000000000..f19c4208d --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h @@ -0,0 +1,1282 @@ +/* + * SMU_7_1_3 Register documentation + * + * Copyright (C) 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef SMU_7_1_3_ENUM_H +#define SMU_7_1_3_ENUM_H + +#define CG_SRBM_START_ADDR 0x600 +#define CG_SRBM_END_ADDR 0x8ff +#define RCU_CCF_DWORDS0 0xa0 +#define RCU_CCF_BITS0 0x1400 +#define RCU_SAM_BYTES 0x2c +#define RCU_SAM_RTL_BYTES 0x2c +#define RCU_SMU_BYTES 0x14 +#define RCU_SMU_RTL_BYTES 0x14 +#define SFP_CHAIN_ADDR 0x1 +#define SFP_SADR 0x0 +#define SFP_EADR 0x37f +#define SAMU_KEY_CHAIN_ADR 0x0 +#define SAMU_KEY_SADR 0x280 +#define SAMU_KEY_EADR 0x2ab +#define SMU_KEY_CHAIN_ADR 0x0 +#define SMU_KEY_SADR 0x2ac +#define SMU_KEY_EADR 0x2bf +#define SMC_MSG_TEST 0x1 +#define SMC_MSG_PHY_LN_OFF 0x2 +#define SMC_MSG_PHY_LN_ON 0x3 +#define SMC_MSG_DDI_PHY_OFF 0x4 +#define SMC_MSG_DDI_PHY_ON 0x5 +#define SMC_MSG_CASCADE_PLL_OFF 0x6 +#define SMC_MSG_CASCADE_PLL_ON 0x7 +#define SMC_MSG_PWR_OFF_x16 0x8 +#define SMC_MSG_CONFIG_LCLK_DPM 0x9 +#define SMC_MSG_FLUSH_DATA_CACHE 0xa +#define SMC_MSG_FLUSH_INSTRUCTION_CACHE 0xb +#define SMC_MSG_CONFIG_VPC_ACCUMULATOR 0xc +#define SMC_MSG_CONFIG_BAPM 0xd +#define SMC_MSG_CONFIG_TDC_LIMIT 0xe +#define SMC_MSG_CONFIG_LPMx 0xf +#define SMC_MSG_CONFIG_HTC_LIMIT 0x10 +#define SMC_MSG_CONFIG_THERMAL_CNTL 0x11 +#define SMC_MSG_CONFIG_VOLTAGE_CNTL 0x12 +#define SMC_MSG_CONFIG_TDP_CNTL 0x13 +#define SMC_MSG_EN_PM_CNTL 0x14 +#define SMC_MSG_DIS_PM_CNTL 0x15 +#define SMC_MSG_CONFIG_NBDPM 0x16 +#define SMC_MSG_CONFIG_LOADLINE 0x17 +#define SMC_MSG_ADJUST_LOADLINE 0x18 +#define SMC_MSG_RESET 0x20 +#define SMC_MSG_VOLTAGE 0x25 +#define SMC_VERSION_MAJOR 0x7 +#define SMC_VERSION_MINOR 0x0 +#define SMC_HEADER_SIZE 0x40 +#define ROM_SIGNATURE 0xaa55 +typedef enum SurfaceEndian { + ENDIAN_NONE = 0x0, + ENDIAN_8IN16 = 0x1, + ENDIAN_8IN32 = 0x2, + ENDIAN_8IN64 = 0x3, +} SurfaceEndian; +typedef enum ArrayMode { + ARRAY_LINEAR_GENERAL = 0x0, + ARRAY_LINEAR_ALIGNED = 0x1, + ARRAY_1D_TILED_THIN1 = 0x2, + ARRAY_1D_TILED_THICK = 0x3, + ARRAY_2D_TILED_THIN1 = 0x4, + ARRAY_PRT_TILED_THIN1 = 0x5, + ARRAY_PRT_2D_TILED_THIN1 = 0x6, + ARRAY_2D_TILED_THICK = 0x7, + ARRAY_2D_TILED_XTHICK = 0x8, + ARRAY_PRT_TILED_THICK = 0x9, + ARRAY_PRT_2D_TILED_THICK = 0xa, + ARRAY_PRT_3D_TILED_THIN1 = 0xb, + ARRAY_3D_TILED_THIN1 = 0xc, + ARRAY_3D_TILED_THICK = 0xd, + ARRAY_3D_TILED_XTHICK = 0xe, + ARRAY_PRT_3D_TILED_THICK = 0xf, +} ArrayMode; +typedef enum PipeTiling { + CONFIG_1_PIPE = 0x0, + CONFIG_2_PIPE = 0x1, + CONFIG_4_PIPE = 0x2, + CONFIG_8_PIPE = 0x3, +} PipeTiling; +typedef enum BankTiling { + CONFIG_4_BANK = 0x0, + CONFIG_8_BANK = 0x1, +} BankTiling; +typedef enum GroupInterleave { + CONFIG_256B_GROUP = 0x0, + CONFIG_512B_GROUP = 0x1, +} GroupInterleave; +typedef enum RowTiling { + CONFIG_1KB_ROW = 0x0, + CONFIG_2KB_ROW = 0x1, + CONFIG_4KB_ROW = 0x2, + CONFIG_8KB_ROW = 0x3, + CONFIG_1KB_ROW_OPT = 0x4, + CONFIG_2KB_ROW_OPT = 0x5, + CONFIG_4KB_ROW_OPT = 0x6, + CONFIG_8KB_ROW_OPT = 0x7, +} RowTiling; +typedef enum BankSwapBytes { + CONFIG_128B_SWAPS = 0x0, + CONFIG_256B_SWAPS = 0x1, + CONFIG_512B_SWAPS = 0x2, + CONFIG_1KB_SWAPS = 0x3, +} BankSwapBytes; +typedef enum SampleSplitBytes { + CONFIG_1KB_SPLIT = 0x0, + CONFIG_2KB_SPLIT = 0x1, + CONFIG_4KB_SPLIT = 0x2, + CONFIG_8KB_SPLIT = 0x3, +} SampleSplitBytes; +typedef enum NumPipes { + ADDR_CONFIG_1_PIPE = 0x0, + ADDR_CONFIG_2_PIPE = 0x1, + ADDR_CONFIG_4_PIPE = 0x2, + ADDR_CONFIG_8_PIPE = 0x3, +} NumPipes; +typedef enum PipeInterleaveSize { + ADDR_CONFIG_PIPE_INTERLEAVE_256B = 0x0, + ADDR_CONFIG_PIPE_INTERLEAVE_512B = 0x1, +} PipeInterleaveSize; +typedef enum BankInterleaveSize { + ADDR_CONFIG_BANK_INTERLEAVE_1 = 0x0, + ADDR_CONFIG_BANK_INTERLEAVE_2 = 0x1, + ADDR_CONFIG_BANK_INTERLEAVE_4 = 0x2, + ADDR_CONFIG_BANK_INTERLEAVE_8 = 0x3, +} BankInterleaveSize; +typedef enum NumShaderEngines { + ADDR_CONFIG_1_SHADER_ENGINE = 0x0, + ADDR_CONFIG_2_SHADER_ENGINE = 0x1, +} NumShaderEngines; +typedef enum ShaderEngineTileSize { + ADDR_CONFIG_SE_TILE_16 = 0x0, + ADDR_CONFIG_SE_TILE_32 = 0x1, +} ShaderEngineTileSize; +typedef enum NumGPUs { + ADDR_CONFIG_1_GPU = 0x0, + ADDR_CONFIG_2_GPU = 0x1, + ADDR_CONFIG_4_GPU = 0x2, +} NumGPUs; +typedef enum MultiGPUTileSize { + ADDR_CONFIG_GPU_TILE_16 = 0x0, + ADDR_CONFIG_GPU_TILE_32 = 0x1, + ADDR_CONFIG_GPU_TILE_64 = 0x2, + ADDR_CONFIG_GPU_TILE_128 = 0x3, +} MultiGPUTileSize; +typedef enum RowSize { + ADDR_CONFIG_1KB_ROW = 0x0, + ADDR_CONFIG_2KB_ROW = 0x1, + ADDR_CONFIG_4KB_ROW = 0x2, +} RowSize; +typedef enum NumLowerPipes { + ADDR_CONFIG_1_LOWER_PIPES = 0x0, + ADDR_CONFIG_2_LOWER_PIPES = 0x1, +} NumLowerPipes; +typedef enum DebugBlockId { + DBG_CLIENT_BLKID_RESERVED = 0x0, + DBG_CLIENT_BLKID_dbg = 0x1, + DBG_CLIENT_BLKID_scf2 = 0x2, + DBG_CLIENT_BLKID_mcd5_0 = 0x3, + DBG_CLIENT_BLKID_mcd5_1 = 0x4, + DBG_CLIENT_BLKID_mcd6_0 = 0x5, + DBG_CLIENT_BLKID_mcd6_1 = 0x6, + DBG_CLIENT_BLKID_mcd7_0 = 0x7, + DBG_CLIENT_BLKID_mcd7_1 = 0x8, + DBG_CLIENT_BLKID_vmc = 0x9, + DBG_CLIENT_BLKID_sx30 = 0xa, + DBG_CLIENT_BLKID_mcd2_0 = 0xb, + DBG_CLIENT_BLKID_mcd2_1 = 0xc, + DBG_CLIENT_BLKID_bci1 = 0xd, + DBG_CLIENT_BLKID_xdma_dbg_client_wrapper = 0xe, + DBG_CLIENT_BLKID_mcc0 = 0xf, + DBG_CLIENT_BLKID_uvdf_0 = 0x10, + DBG_CLIENT_BLKID_uvdf_1 = 0x11, + DBG_CLIENT_BLKID_uvdf_2 = 0x12, + DBG_CLIENT_BLKID_bci0 = 0x13, + DBG_CLIENT_BLKID_vcec0_0 = 0x14, + DBG_CLIENT_BLKID_cb100 = 0x15, + DBG_CLIENT_BLKID_cb001 = 0x16, + DBG_CLIENT_BLKID_cb002 = 0x17, + DBG_CLIENT_BLKID_cb003 = 0x18, + DBG_CLIENT_BLKID_mcd4_0 = 0x19, + DBG_CLIENT_BLKID_mcd4_1 = 0x1a, + DBG_CLIENT_BLKID_tmonw00 = 0x1b, + DBG_CLIENT_BLKID_cb101 = 0x1c, + DBG_CLIENT_BLKID_cb102 = 0x1d, + DBG_CLIENT_BLKID_cb103 = 0x1e, + DBG_CLIENT_BLKID_sx10 = 0x1f, + DBG_CLIENT_BLKID_cb301 = 0x20, + DBG_CLIENT_BLKID_cb302 = 0x21, + DBG_CLIENT_BLKID_cb303 = 0x22, + DBG_CLIENT_BLKID_tmonw01 = 0x23, + DBG_CLIENT_BLKID_tmonw02 = 0x24, + DBG_CLIENT_BLKID_vcea0_0 = 0x25, + DBG_CLIENT_BLKID_vcea0_1 = 0x26, + DBG_CLIENT_BLKID_vcea0_2 = 0x27, + DBG_CLIENT_BLKID_vcea0_3 = 0x28, + DBG_CLIENT_BLKID_scf1 = 0x29, + DBG_CLIENT_BLKID_sx20 = 0x2a, + DBG_CLIENT_BLKID_spim1 = 0x2b, + DBG_CLIENT_BLKID_scb1 = 0x2c, + DBG_CLIENT_BLKID_pa10 = 0x2d, + DBG_CLIENT_BLKID_pa00 = 0x2e, + DBG_CLIENT_BLKID_gmcon = 0x2f, + DBG_CLIENT_BLKID_mcb = 0x30, + DBG_CLIENT_BLKID_vgt0 = 0x31, + DBG_CLIENT_BLKID_pc0 = 0x32, + DBG_CLIENT_BLKID_bci2 = 0x33, + DBG_CLIENT_BLKID_uvdb_0 = 0x34, + DBG_CLIENT_BLKID_spim3 = 0x35, + DBG_CLIENT_BLKID_scb3 = 0x36, + DBG_CLIENT_BLKID_cpc_0 = 0x37, + DBG_CLIENT_BLKID_cpc_1 = 0x38, + DBG_CLIENT_BLKID_uvdm_0 = 0x39, + DBG_CLIENT_BLKID_uvdm_1 = 0x3a, + DBG_CLIENT_BLKID_uvdm_2 = 0x3b, + DBG_CLIENT_BLKID_uvdm_3 = 0x3c, + DBG_CLIENT_BLKID_cb000 = 0x3d, + DBG_CLIENT_BLKID_spim0 = 0x3e, + DBG_CLIENT_BLKID_scb0 = 0x3f, + DBG_CLIENT_BLKID_mcc2 = 0x40, + DBG_CLIENT_BLKID_ds0 = 0x41, + DBG_CLIENT_BLKID_srbm = 0x42, + DBG_CLIENT_BLKID_ih = 0x43, + DBG_CLIENT_BLKID_sem = 0x44, + DBG_CLIENT_BLKID_sdma_0 = 0x45, + DBG_CLIENT_BLKID_sdma_1 = 0x46, + DBG_CLIENT_BLKID_hdp = 0x47, + DBG_CLIENT_BLKID_acp_0 = 0x48, + DBG_CLIENT_BLKID_acp_1 = 0x49, + DBG_CLIENT_BLKID_cb200 = 0x4a, + DBG_CLIENT_BLKID_scf3 = 0x4b, + DBG_CLIENT_BLKID_bci3 = 0x4c, + DBG_CLIENT_BLKID_mcd0_0 = 0x4d, + DBG_CLIENT_BLKID_mcd0_1 = 0x4e, + DBG_CLIENT_BLKID_pa11 = 0x4f, + DBG_CLIENT_BLKID_pa01 = 0x50, + DBG_CLIENT_BLKID_cb201 = 0x51, + DBG_CLIENT_BLKID_cb202 = 0x52, + DBG_CLIENT_BLKID_cb203 = 0x53, + DBG_CLIENT_BLKID_spim2 = 0x54, + DBG_CLIENT_BLKID_scb2 = 0x55, + DBG_CLIENT_BLKID_vgt2 = 0x56, + DBG_CLIENT_BLKID_pc2 = 0x57, + DBG_CLIENT_BLKID_smu_0 = 0x58, + DBG_CLIENT_BLKID_smu_1 = 0x59, + DBG_CLIENT_BLKID_smu_2 = 0x5a, + DBG_CLIENT_BLKID_cb1 = 0x5b, + DBG_CLIENT_BLKID_ia0 = 0x5c, + DBG_CLIENT_BLKID_wd = 0x5d, + DBG_CLIENT_BLKID_ia1 = 0x5e, + DBG_CLIENT_BLKID_scf0 = 0x5f, + DBG_CLIENT_BLKID_vgt1 = 0x60, + DBG_CLIENT_BLKID_pc1 = 0x61, + DBG_CLIENT_BLKID_cb0 = 0x62, + DBG_CLIENT_BLKID_gdc_one_0 = 0x63, + DBG_CLIENT_BLKID_gdc_one_1 = 0x64, + DBG_CLIENT_BLKID_gdc_one_2 = 0x65, + DBG_CLIENT_BLKID_gdc_one_3 = 0x66, + DBG_CLIENT_BLKID_gdc_one_4 = 0x67, + DBG_CLIENT_BLKID_gdc_one_5 = 0x68, + DBG_CLIENT_BLKID_gdc_one_6 = 0x69, + DBG_CLIENT_BLKID_gdc_one_7 = 0x6a, + DBG_CLIENT_BLKID_gdc_one_8 = 0x6b, + DBG_CLIENT_BLKID_gdc_one_9 = 0x6c, + DBG_CLIENT_BLKID_gdc_one_10 = 0x6d, + DBG_CLIENT_BLKID_gdc_one_11 = 0x6e, + DBG_CLIENT_BLKID_gdc_one_12 = 0x6f, + DBG_CLIENT_BLKID_gdc_one_13 = 0x70, + DBG_CLIENT_BLKID_gdc_one_14 = 0x71, + DBG_CLIENT_BLKID_gdc_one_15 = 0x72, + DBG_CLIENT_BLKID_gdc_one_16 = 0x73, + DBG_CLIENT_BLKID_gdc_one_17 = 0x74, + DBG_CLIENT_BLKID_gdc_one_18 = 0x75, + DBG_CLIENT_BLKID_gdc_one_19 = 0x76, + DBG_CLIENT_BLKID_gdc_one_20 = 0x77, + DBG_CLIENT_BLKID_gdc_one_21 = 0x78, + DBG_CLIENT_BLKID_gdc_one_22 = 0x79, + DBG_CLIENT_BLKID_gdc_one_23 = 0x7a, + DBG_CLIENT_BLKID_gdc_one_24 = 0x7b, + DBG_CLIENT_BLKID_gdc_one_25 = 0x7c, + DBG_CLIENT_BLKID_gdc_one_26 = 0x7d, + DBG_CLIENT_BLKID_gdc_one_27 = 0x7e, + DBG_CLIENT_BLKID_gdc_one_28 = 0x7f, + DBG_CLIENT_BLKID_gdc_one_29 = 0x80, + DBG_CLIENT_BLKID_gdc_one_30 = 0x81, + DBG_CLIENT_BLKID_gdc_one_31 = 0x82, + DBG_CLIENT_BLKID_gdc_one_32 = 0x83, + DBG_CLIENT_BLKID_gdc_one_33 = 0x84, + DBG_CLIENT_BLKID_gdc_one_34 = 0x85, + DBG_CLIENT_BLKID_gdc_one_35 = 0x86, + DBG_CLIENT_BLKID_vceb0_0 = 0x87, + DBG_CLIENT_BLKID_vgt3 = 0x88, + DBG_CLIENT_BLKID_pc3 = 0x89, + DBG_CLIENT_BLKID_mcd3_0 = 0x8a, + DBG_CLIENT_BLKID_mcd3_1 = 0x8b, + DBG_CLIENT_BLKID_uvdu_0 = 0x8c, + DBG_CLIENT_BLKID_uvdu_1 = 0x8d, + DBG_CLIENT_BLKID_uvdu_2 = 0x8e, + DBG_CLIENT_BLKID_uvdu_3 = 0x8f, + DBG_CLIENT_BLKID_uvdu_4 = 0x90, + DBG_CLIENT_BLKID_uvdu_5 = 0x91, + DBG_CLIENT_BLKID_uvdu_6 = 0x92, + DBG_CLIENT_BLKID_cb300 = 0x93, + DBG_CLIENT_BLKID_mcd1_0 = 0x94, + DBG_CLIENT_BLKID_mcd1_1 = 0x95, + DBG_CLIENT_BLKID_sx00 = 0x96, + DBG_CLIENT_BLKID_uvdc_0 = 0x97, + DBG_CLIENT_BLKID_uvdc_1 = 0x98, + DBG_CLIENT_BLKID_mcc3 = 0x99, + DBG_CLIENT_BLKID_mcc4 = 0x9a, + DBG_CLIENT_BLKID_mcc5 = 0x9b, + DBG_CLIENT_BLKID_mcc6 = 0x9c, + DBG_CLIENT_BLKID_mcc7 = 0x9d, + DBG_CLIENT_BLKID_cpg_0 = 0x9e, + DBG_CLIENT_BLKID_cpg_1 = 0x9f, + DBG_CLIENT_BLKID_gck = 0xa0, + DBG_CLIENT_BLKID_mcc1 = 0xa1, + DBG_CLIENT_BLKID_cpf_0 = 0xa2, + DBG_CLIENT_BLKID_cpf_1 = 0xa3, + DBG_CLIENT_BLKID_rlc = 0xa4, + DBG_CLIENT_BLKID_grbm = 0xa5, + DBG_CLIENT_BLKID_sammsp = 0xa6, + DBG_CLIENT_BLKID_dci_pg = 0xa7, + DBG_CLIENT_BLKID_dci_0 = 0xa8, + DBG_CLIENT_BLKID_dccg0_0 = 0xa9, + DBG_CLIENT_BLKID_dccg0_1 = 0xaa, + DBG_CLIENT_BLKID_dcfe01_0 = 0xab, + DBG_CLIENT_BLKID_dcfe02_0 = 0xac, + DBG_CLIENT_BLKID_dcfe03_0 = 0xad, + DBG_CLIENT_BLKID_dcfe04_0 = 0xae, + DBG_CLIENT_BLKID_dcfe05_0 = 0xaf, + DBG_CLIENT_BLKID_dcfe06_0 = 0xb0, + DBG_CLIENT_BLKID_mcq0_0 = 0xb1, + DBG_CLIENT_BLKID_mcq0_1 = 0xb2, + DBG_CLIENT_BLKID_mcq1_0 = 0xb3, + DBG_CLIENT_BLKID_mcq1_1 = 0xb4, + DBG_CLIENT_BLKID_mcq2_0 = 0xb5, + DBG_CLIENT_BLKID_mcq2_1 = 0xb6, + DBG_CLIENT_BLKID_mcq3_0 = 0xb7, + DBG_CLIENT_BLKID_mcq3_1 = 0xb8, + DBG_CLIENT_BLKID_mcq4_0 = 0xb9, + DBG_CLIENT_BLKID_mcq4_1 = 0xba, + DBG_CLIENT_BLKID_mcq5_0 = 0xbb, + DBG_CLIENT_BLKID_mcq5_1 = 0xbc, + DBG_CLIENT_BLKID_mcq6_0 = 0xbd, + DBG_CLIENT_BLKID_mcq6_1 = 0xbe, + DBG_CLIENT_BLKID_mcq7_0 = 0xbf, + DBG_CLIENT_BLKID_mcq7_1 = 0xc0, + DBG_CLIENT_BLKID_uvdi_0 = 0xc1, + DBG_CLIENT_BLKID_RESERVED_LAST = 0xc2, +} DebugBlockId; +typedef enum DebugBlockId_OLD { + DBG_BLOCK_ID_RESERVED = 0x0, + DBG_BLOCK_ID_DBG = 0x1, + DBG_BLOCK_ID_VMC = 0x2, + DBG_BLOCK_ID_PDMA = 0x3, + DBG_BLOCK_ID_CG = 0x4, + DBG_BLOCK_ID_SRBM = 0x5, + DBG_BLOCK_ID_GRBM = 0x6, + DBG_BLOCK_ID_RLC = 0x7, + DBG_BLOCK_ID_CSC = 0x8, + DBG_BLOCK_ID_SEM = 0x9, + DBG_BLOCK_ID_IH = 0xa, + DBG_BLOCK_ID_SC = 0xb, + DBG_BLOCK_ID_SQ = 0xc, + DBG_BLOCK_ID_AVP = 0xd, + DBG_BLOCK_ID_GMCON = 0xe, + DBG_BLOCK_ID_SMU = 0xf, + DBG_BLOCK_ID_DMA0 = 0x10, + DBG_BLOCK_ID_DMA1 = 0x11, + DBG_BLOCK_ID_SPIM = 0x12, + DBG_BLOCK_ID_GDS = 0x13, + DBG_BLOCK_ID_SPIS = 0x14, + DBG_BLOCK_ID_UNUSED0 = 0x15, + DBG_BLOCK_ID_PA0 = 0x16, + DBG_BLOCK_ID_PA1 = 0x17, + DBG_BLOCK_ID_CP0 = 0x18, + DBG_BLOCK_ID_CP1 = 0x19, + DBG_BLOCK_ID_CP2 = 0x1a, + DBG_BLOCK_ID_UNUSED1 = 0x1b, + DBG_BLOCK_ID_UVDU = 0x1c, + DBG_BLOCK_ID_UVDM = 0x1d, + DBG_BLOCK_ID_VCE = 0x1e, + DBG_BLOCK_ID_UNUSED2 = 0x1f, + DBG_BLOCK_ID_VGT0 = 0x20, + DBG_BLOCK_ID_VGT1 = 0x21, + DBG_BLOCK_ID_IA = 0x22, + DBG_BLOCK_ID_UNUSED3 = 0x23, + DBG_BLOCK_ID_SCT0 = 0x24, + DBG_BLOCK_ID_SCT1 = 0x25, + DBG_BLOCK_ID_SPM0 = 0x26, + DBG_BLOCK_ID_SPM1 = 0x27, + DBG_BLOCK_ID_TCAA = 0x28, + DBG_BLOCK_ID_TCAB = 0x29, + DBG_BLOCK_ID_TCCA = 0x2a, + DBG_BLOCK_ID_TCCB = 0x2b, + DBG_BLOCK_ID_MCC0 = 0x2c, + DBG_BLOCK_ID_MCC1 = 0x2d, + DBG_BLOCK_ID_MCC2 = 0x2e, + DBG_BLOCK_ID_MCC3 = 0x2f, + DBG_BLOCK_ID_SX0 = 0x30, + DBG_BLOCK_ID_SX1 = 0x31, + DBG_BLOCK_ID_SX2 = 0x32, + DBG_BLOCK_ID_SX3 = 0x33, + DBG_BLOCK_ID_UNUSED4 = 0x34, + DBG_BLOCK_ID_UNUSED5 = 0x35, + DBG_BLOCK_ID_UNUSED6 = 0x36, + DBG_BLOCK_ID_UNUSED7 = 0x37, + DBG_BLOCK_ID_PC0 = 0x38, + DBG_BLOCK_ID_PC1 = 0x39, + DBG_BLOCK_ID_UNUSED8 = 0x3a, + DBG_BLOCK_ID_UNUSED9 = 0x3b, + DBG_BLOCK_ID_UNUSED10 = 0x3c, + DBG_BLOCK_ID_UNUSED11 = 0x3d, + DBG_BLOCK_ID_MCB = 0x3e, + DBG_BLOCK_ID_UNUSED12 = 0x3f, + DBG_BLOCK_ID_SCB0 = 0x40, + DBG_BLOCK_ID_SCB1 = 0x41, + DBG_BLOCK_ID_UNUSED13 = 0x42, + DBG_BLOCK_ID_UNUSED14 = 0x43, + DBG_BLOCK_ID_SCF0 = 0x44, + DBG_BLOCK_ID_SCF1 = 0x45, + DBG_BLOCK_ID_UNUSED15 = 0x46, + DBG_BLOCK_ID_UNUSED16 = 0x47, + DBG_BLOCK_ID_BCI0 = 0x48, + DBG_BLOCK_ID_BCI1 = 0x49, + DBG_BLOCK_ID_BCI2 = 0x4a, + DBG_BLOCK_ID_BCI3 = 0x4b, + DBG_BLOCK_ID_UNUSED17 = 0x4c, + DBG_BLOCK_ID_UNUSED18 = 0x4d, + DBG_BLOCK_ID_UNUSED19 = 0x4e, + DBG_BLOCK_ID_UNUSED20 = 0x4f, + DBG_BLOCK_ID_CB00 = 0x50, + DBG_BLOCK_ID_CB01 = 0x51, + DBG_BLOCK_ID_CB02 = 0x52, + DBG_BLOCK_ID_CB03 = 0x53, + DBG_BLOCK_ID_CB04 = 0x54, + DBG_BLOCK_ID_UNUSED21 = 0x55, + DBG_BLOCK_ID_UNUSED22 = 0x56, + DBG_BLOCK_ID_UNUSED23 = 0x57, + DBG_BLOCK_ID_CB10 = 0x58, + DBG_BLOCK_ID_CB11 = 0x59, + DBG_BLOCK_ID_CB12 = 0x5a, + DBG_BLOCK_ID_CB13 = 0x5b, + DBG_BLOCK_ID_CB14 = 0x5c, + DBG_BLOCK_ID_UNUSED24 = 0x5d, + DBG_BLOCK_ID_UNUSED25 = 0x5e, + DBG_BLOCK_ID_UNUSED26 = 0x5f, + DBG_BLOCK_ID_TCP0 = 0x60, + DBG_BLOCK_ID_TCP1 = 0x61, + DBG_BLOCK_ID_TCP2 = 0x62, + DBG_BLOCK_ID_TCP3 = 0x63, + DBG_BLOCK_ID_TCP4 = 0x64, + DBG_BLOCK_ID_TCP5 = 0x65, + DBG_BLOCK_ID_TCP6 = 0x66, + DBG_BLOCK_ID_TCP7 = 0x67, + DBG_BLOCK_ID_TCP8 = 0x68, + DBG_BLOCK_ID_TCP9 = 0x69, + DBG_BLOCK_ID_TCP10 = 0x6a, + DBG_BLOCK_ID_TCP11 = 0x6b, + DBG_BLOCK_ID_TCP12 = 0x6c, + DBG_BLOCK_ID_TCP13 = 0x6d, + DBG_BLOCK_ID_TCP14 = 0x6e, + DBG_BLOCK_ID_TCP15 = 0x6f, + DBG_BLOCK_ID_TCP16 = 0x70, + DBG_BLOCK_ID_TCP17 = 0x71, + DBG_BLOCK_ID_TCP18 = 0x72, + DBG_BLOCK_ID_TCP19 = 0x73, + DBG_BLOCK_ID_TCP20 = 0x74, + DBG_BLOCK_ID_TCP21 = 0x75, + DBG_BLOCK_ID_TCP22 = 0x76, + DBG_BLOCK_ID_TCP23 = 0x77, + DBG_BLOCK_ID_TCP_RESERVED0 = 0x78, + DBG_BLOCK_ID_TCP_RESERVED1 = 0x79, + DBG_BLOCK_ID_TCP_RESERVED2 = 0x7a, + DBG_BLOCK_ID_TCP_RESERVED3 = 0x7b, + DBG_BLOCK_ID_TCP_RESERVED4 = 0x7c, + DBG_BLOCK_ID_TCP_RESERVED5 = 0x7d, + DBG_BLOCK_ID_TCP_RESERVED6 = 0x7e, + DBG_BLOCK_ID_TCP_RESERVED7 = 0x7f, + DBG_BLOCK_ID_DB00 = 0x80, + DBG_BLOCK_ID_DB01 = 0x81, + DBG_BLOCK_ID_DB02 = 0x82, + DBG_BLOCK_ID_DB03 = 0x83, + DBG_BLOCK_ID_DB04 = 0x84, + DBG_BLOCK_ID_UNUSED27 = 0x85, + DBG_BLOCK_ID_UNUSED28 = 0x86, + DBG_BLOCK_ID_UNUSED29 = 0x87, + DBG_BLOCK_ID_DB10 = 0x88, + DBG_BLOCK_ID_DB11 = 0x89, + DBG_BLOCK_ID_DB12 = 0x8a, + DBG_BLOCK_ID_DB13 = 0x8b, + DBG_BLOCK_ID_DB14 = 0x8c, + DBG_BLOCK_ID_UNUSED30 = 0x8d, + DBG_BLOCK_ID_UNUSED31 = 0x8e, + DBG_BLOCK_ID_UNUSED32 = 0x8f, + DBG_BLOCK_ID_TCC0 = 0x90, + DBG_BLOCK_ID_TCC1 = 0x91, + DBG_BLOCK_ID_TCC2 = 0x92, + DBG_BLOCK_ID_TCC3 = 0x93, + DBG_BLOCK_ID_TCC4 = 0x94, + DBG_BLOCK_ID_TCC5 = 0x95, + DBG_BLOCK_ID_TCC6 = 0x96, + DBG_BLOCK_ID_TCC7 = 0x97, + DBG_BLOCK_ID_SPS00 = 0x98, + DBG_BLOCK_ID_SPS01 = 0x99, + DBG_BLOCK_ID_SPS02 = 0x9a, + DBG_BLOCK_ID_SPS10 = 0x9b, + DBG_BLOCK_ID_SPS11 = 0x9c, + DBG_BLOCK_ID_SPS12 = 0x9d, + DBG_BLOCK_ID_UNUSED33 = 0x9e, + DBG_BLOCK_ID_UNUSED34 = 0x9f, + DBG_BLOCK_ID_TA00 = 0xa0, + DBG_BLOCK_ID_TA01 = 0xa1, + DBG_BLOCK_ID_TA02 = 0xa2, + DBG_BLOCK_ID_TA03 = 0xa3, + DBG_BLOCK_ID_TA04 = 0xa4, + DBG_BLOCK_ID_TA05 = 0xa5, + DBG_BLOCK_ID_TA06 = 0xa6, + DBG_BLOCK_ID_TA07 = 0xa7, + DBG_BLOCK_ID_TA08 = 0xa8, + DBG_BLOCK_ID_TA09 = 0xa9, + DBG_BLOCK_ID_TA0A = 0xaa, + DBG_BLOCK_ID_TA0B = 0xab, + DBG_BLOCK_ID_UNUSED35 = 0xac, + DBG_BLOCK_ID_UNUSED36 = 0xad, + DBG_BLOCK_ID_UNUSED37 = 0xae, + DBG_BLOCK_ID_UNUSED38 = 0xaf, + DBG_BLOCK_ID_TA10 = 0xb0, + DBG_BLOCK_ID_TA11 = 0xb1, + DBG_BLOCK_ID_TA12 = 0xb2, + DBG_BLOCK_ID_TA13 = 0xb3, + DBG_BLOCK_ID_TA14 = 0xb4, + DBG_BLOCK_ID_TA15 = 0xb5, + DBG_BLOCK_ID_TA16 = 0xb6, + DBG_BLOCK_ID_TA17 = 0xb7, + DBG_BLOCK_ID_TA18 = 0xb8, + DBG_BLOCK_ID_TA19 = 0xb9, + DBG_BLOCK_ID_TA1A = 0xba, + DBG_BLOCK_ID_TA1B = 0xbb, + DBG_BLOCK_ID_UNUSED39 = 0xbc, + DBG_BLOCK_ID_UNUSED40 = 0xbd, + DBG_BLOCK_ID_UNUSED41 = 0xbe, + DBG_BLOCK_ID_UNUSED42 = 0xbf, + DBG_BLOCK_ID_TD00 = 0xc0, + DBG_BLOCK_ID_TD01 = 0xc1, + DBG_BLOCK_ID_TD02 = 0xc2, + DBG_BLOCK_ID_TD03 = 0xc3, + DBG_BLOCK_ID_TD04 = 0xc4, + DBG_BLOCK_ID_TD05 = 0xc5, + DBG_BLOCK_ID_TD06 = 0xc6, + DBG_BLOCK_ID_TD07 = 0xc7, + DBG_BLOCK_ID_TD08 = 0xc8, + DBG_BLOCK_ID_TD09 = 0xc9, + DBG_BLOCK_ID_TD0A = 0xca, + DBG_BLOCK_ID_TD0B = 0xcb, + DBG_BLOCK_ID_UNUSED43 = 0xcc, + DBG_BLOCK_ID_UNUSED44 = 0xcd, + DBG_BLOCK_ID_UNUSED45 = 0xce, + DBG_BLOCK_ID_UNUSED46 = 0xcf, + DBG_BLOCK_ID_TD10 = 0xd0, + DBG_BLOCK_ID_TD11 = 0xd1, + DBG_BLOCK_ID_TD12 = 0xd2, + DBG_BLOCK_ID_TD13 = 0xd3, + DBG_BLOCK_ID_TD14 = 0xd4, + DBG_BLOCK_ID_TD15 = 0xd5, + DBG_BLOCK_ID_TD16 = 0xd6, + DBG_BLOCK_ID_TD17 = 0xd7, + DBG_BLOCK_ID_TD18 = 0xd8, + DBG_BLOCK_ID_TD19 = 0xd9, + DBG_BLOCK_ID_TD1A = 0xda, + DBG_BLOCK_ID_TD1B = 0xdb, + DBG_BLOCK_ID_UNUSED47 = 0xdc, + DBG_BLOCK_ID_UNUSED48 = 0xdd, + DBG_BLOCK_ID_UNUSED49 = 0xde, + DBG_BLOCK_ID_UNUSED50 = 0xdf, + DBG_BLOCK_ID_MCD0 = 0xe0, + DBG_BLOCK_ID_MCD1 = 0xe1, + DBG_BLOCK_ID_MCD2 = 0xe2, + DBG_BLOCK_ID_MCD3 = 0xe3, + DBG_BLOCK_ID_MCD4 = 0xe4, + DBG_BLOCK_ID_MCD5 = 0xe5, + DBG_BLOCK_ID_UNUSED51 = 0xe6, + DBG_BLOCK_ID_UNUSED52 = 0xe7, +} DebugBlockId_OLD; +typedef enum DebugBlockId_BY2 { + DBG_BLOCK_ID_RESERVED_BY2 = 0x0, + DBG_BLOCK_ID_VMC_BY2 = 0x1, + DBG_BLOCK_ID_CG_BY2 = 0x2, + DBG_BLOCK_ID_GRBM_BY2 = 0x3, + DBG_BLOCK_ID_CSC_BY2 = 0x4, + DBG_BLOCK_ID_IH_BY2 = 0x5, + DBG_BLOCK_ID_SQ_BY2 = 0x6, + DBG_BLOCK_ID_GMCON_BY2 = 0x7, + DBG_BLOCK_ID_DMA0_BY2 = 0x8, + DBG_BLOCK_ID_SPIM_BY2 = 0x9, + DBG_BLOCK_ID_SPIS_BY2 = 0xa, + DBG_BLOCK_ID_PA0_BY2 = 0xb, + DBG_BLOCK_ID_CP0_BY2 = 0xc, + DBG_BLOCK_ID_CP2_BY2 = 0xd, + DBG_BLOCK_ID_UVDU_BY2 = 0xe, + DBG_BLOCK_ID_VCE_BY2 = 0xf, + DBG_BLOCK_ID_VGT0_BY2 = 0x10, + DBG_BLOCK_ID_IA_BY2 = 0x11, + DBG_BLOCK_ID_SCT0_BY2 = 0x12, + DBG_BLOCK_ID_SPM0_BY2 = 0x13, + DBG_BLOCK_ID_TCAA_BY2 = 0x14, + DBG_BLOCK_ID_TCCA_BY2 = 0x15, + DBG_BLOCK_ID_MCC0_BY2 = 0x16, + DBG_BLOCK_ID_MCC2_BY2 = 0x17, + DBG_BLOCK_ID_SX0_BY2 = 0x18, + DBG_BLOCK_ID_SX2_BY2 = 0x19, + DBG_BLOCK_ID_UNUSED4_BY2 = 0x1a, + DBG_BLOCK_ID_UNUSED6_BY2 = 0x1b, + DBG_BLOCK_ID_PC0_BY2 = 0x1c, + DBG_BLOCK_ID_UNUSED8_BY2 = 0x1d, + DBG_BLOCK_ID_UNUSED10_BY2 = 0x1e, + DBG_BLOCK_ID_MCB_BY2 = 0x1f, + DBG_BLOCK_ID_SCB0_BY2 = 0x20, + DBG_BLOCK_ID_UNUSED13_BY2 = 0x21, + DBG_BLOCK_ID_SCF0_BY2 = 0x22, + DBG_BLOCK_ID_UNUSED15_BY2 = 0x23, + DBG_BLOCK_ID_BCI0_BY2 = 0x24, + DBG_BLOCK_ID_BCI2_BY2 = 0x25, + DBG_BLOCK_ID_UNUSED17_BY2 = 0x26, + DBG_BLOCK_ID_UNUSED19_BY2 = 0x27, + DBG_BLOCK_ID_CB00_BY2 = 0x28, + DBG_BLOCK_ID_CB02_BY2 = 0x29, + DBG_BLOCK_ID_CB04_BY2 = 0x2a, + DBG_BLOCK_ID_UNUSED22_BY2 = 0x2b, + DBG_BLOCK_ID_CB10_BY2 = 0x2c, + DBG_BLOCK_ID_CB12_BY2 = 0x2d, + DBG_BLOCK_ID_CB14_BY2 = 0x2e, + DBG_BLOCK_ID_UNUSED25_BY2 = 0x2f, + DBG_BLOCK_ID_TCP0_BY2 = 0x30, + DBG_BLOCK_ID_TCP2_BY2 = 0x31, + DBG_BLOCK_ID_TCP4_BY2 = 0x32, + DBG_BLOCK_ID_TCP6_BY2 = 0x33, + DBG_BLOCK_ID_TCP8_BY2 = 0x34, + DBG_BLOCK_ID_TCP10_BY2 = 0x35, + DBG_BLOCK_ID_TCP12_BY2 = 0x36, + DBG_BLOCK_ID_TCP14_BY2 = 0x37, + DBG_BLOCK_ID_TCP16_BY2 = 0x38, + DBG_BLOCK_ID_TCP18_BY2 = 0x39, + DBG_BLOCK_ID_TCP20_BY2 = 0x3a, + DBG_BLOCK_ID_TCP22_BY2 = 0x3b, + DBG_BLOCK_ID_TCP_RESERVED0_BY2 = 0x3c, + DBG_BLOCK_ID_TCP_RESERVED2_BY2 = 0x3d, + DBG_BLOCK_ID_TCP_RESERVED4_BY2 = 0x3e, + DBG_BLOCK_ID_TCP_RESERVED6_BY2 = 0x3f, + DBG_BLOCK_ID_DB00_BY2 = 0x40, + DBG_BLOCK_ID_DB02_BY2 = 0x41, + DBG_BLOCK_ID_DB04_BY2 = 0x42, + DBG_BLOCK_ID_UNUSED28_BY2 = 0x43, + DBG_BLOCK_ID_DB10_BY2 = 0x44, + DBG_BLOCK_ID_DB12_BY2 = 0x45, + DBG_BLOCK_ID_DB14_BY2 = 0x46, + DBG_BLOCK_ID_UNUSED31_BY2 = 0x47, + DBG_BLOCK_ID_TCC0_BY2 = 0x48, + DBG_BLOCK_ID_TCC2_BY2 = 0x49, + DBG_BLOCK_ID_TCC4_BY2 = 0x4a, + DBG_BLOCK_ID_TCC6_BY2 = 0x4b, + DBG_BLOCK_ID_SPS00_BY2 = 0x4c, + DBG_BLOCK_ID_SPS02_BY2 = 0x4d, + DBG_BLOCK_ID_SPS11_BY2 = 0x4e, + DBG_BLOCK_ID_UNUSED33_BY2 = 0x4f, + DBG_BLOCK_ID_TA00_BY2 = 0x50, + DBG_BLOCK_ID_TA02_BY2 = 0x51, + DBG_BLOCK_ID_TA04_BY2 = 0x52, + DBG_BLOCK_ID_TA06_BY2 = 0x53, + DBG_BLOCK_ID_TA08_BY2 = 0x54, + DBG_BLOCK_ID_TA0A_BY2 = 0x55, + DBG_BLOCK_ID_UNUSED35_BY2 = 0x56, + DBG_BLOCK_ID_UNUSED37_BY2 = 0x57, + DBG_BLOCK_ID_TA10_BY2 = 0x58, + DBG_BLOCK_ID_TA12_BY2 = 0x59, + DBG_BLOCK_ID_TA14_BY2 = 0x5a, + DBG_BLOCK_ID_TA16_BY2 = 0x5b, + DBG_BLOCK_ID_TA18_BY2 = 0x5c, + DBG_BLOCK_ID_TA1A_BY2 = 0x5d, + DBG_BLOCK_ID_UNUSED39_BY2 = 0x5e, + DBG_BLOCK_ID_UNUSED41_BY2 = 0x5f, + DBG_BLOCK_ID_TD00_BY2 = 0x60, + DBG_BLOCK_ID_TD02_BY2 = 0x61, + DBG_BLOCK_ID_TD04_BY2 = 0x62, + DBG_BLOCK_ID_TD06_BY2 = 0x63, + DBG_BLOCK_ID_TD08_BY2 = 0x64, + DBG_BLOCK_ID_TD0A_BY2 = 0x65, + DBG_BLOCK_ID_UNUSED43_BY2 = 0x66, + DBG_BLOCK_ID_UNUSED45_BY2 = 0x67, + DBG_BLOCK_ID_TD10_BY2 = 0x68, + DBG_BLOCK_ID_TD12_BY2 = 0x69, + DBG_BLOCK_ID_TD14_BY2 = 0x6a, + DBG_BLOCK_ID_TD16_BY2 = 0x6b, + DBG_BLOCK_ID_TD18_BY2 = 0x6c, + DBG_BLOCK_ID_TD1A_BY2 = 0x6d, + DBG_BLOCK_ID_UNUSED47_BY2 = 0x6e, + DBG_BLOCK_ID_UNUSED49_BY2 = 0x6f, + DBG_BLOCK_ID_MCD0_BY2 = 0x70, + DBG_BLOCK_ID_MCD2_BY2 = 0x71, + DBG_BLOCK_ID_MCD4_BY2 = 0x72, + DBG_BLOCK_ID_UNUSED51_BY2 = 0x73, +} DebugBlockId_BY2; +typedef enum DebugBlockId_BY4 { + DBG_BLOCK_ID_RESERVED_BY4 = 0x0, + DBG_BLOCK_ID_CG_BY4 = 0x1, + DBG_BLOCK_ID_CSC_BY4 = 0x2, + DBG_BLOCK_ID_SQ_BY4 = 0x3, + DBG_BLOCK_ID_DMA0_BY4 = 0x4, + DBG_BLOCK_ID_SPIS_BY4 = 0x5, + DBG_BLOCK_ID_CP0_BY4 = 0x6, + DBG_BLOCK_ID_UVDU_BY4 = 0x7, + DBG_BLOCK_ID_VGT0_BY4 = 0x8, + DBG_BLOCK_ID_SCT0_BY4 = 0x9, + DBG_BLOCK_ID_TCAA_BY4 = 0xa, + DBG_BLOCK_ID_MCC0_BY4 = 0xb, + DBG_BLOCK_ID_SX0_BY4 = 0xc, + DBG_BLOCK_ID_UNUSED4_BY4 = 0xd, + DBG_BLOCK_ID_PC0_BY4 = 0xe, + DBG_BLOCK_ID_UNUSED10_BY4 = 0xf, + DBG_BLOCK_ID_SCB0_BY4 = 0x10, + DBG_BLOCK_ID_SCF0_BY4 = 0x11, + DBG_BLOCK_ID_BCI0_BY4 = 0x12, + DBG_BLOCK_ID_UNUSED17_BY4 = 0x13, + DBG_BLOCK_ID_CB00_BY4 = 0x14, + DBG_BLOCK_ID_CB04_BY4 = 0x15, + DBG_BLOCK_ID_CB10_BY4 = 0x16, + DBG_BLOCK_ID_CB14_BY4 = 0x17, + DBG_BLOCK_ID_TCP0_BY4 = 0x18, + DBG_BLOCK_ID_TCP4_BY4 = 0x19, + DBG_BLOCK_ID_TCP8_BY4 = 0x1a, + DBG_BLOCK_ID_TCP12_BY4 = 0x1b, + DBG_BLOCK_ID_TCP16_BY4 = 0x1c, + DBG_BLOCK_ID_TCP20_BY4 = 0x1d, + DBG_BLOCK_ID_TCP_RESERVED0_BY4 = 0x1e, + DBG_BLOCK_ID_TCP_RESERVED4_BY4 = 0x1f, + DBG_BLOCK_ID_DB_BY4 = 0x20, + DBG_BLOCK_ID_DB04_BY4 = 0x21, + DBG_BLOCK_ID_DB10_BY4 = 0x22, + DBG_BLOCK_ID_DB14_BY4 = 0x23, + DBG_BLOCK_ID_TCC0_BY4 = 0x24, + DBG_BLOCK_ID_TCC4_BY4 = 0x25, + DBG_BLOCK_ID_SPS00_BY4 = 0x26, + DBG_BLOCK_ID_SPS11_BY4 = 0x27, + DBG_BLOCK_ID_TA00_BY4 = 0x28, + DBG_BLOCK_ID_TA04_BY4 = 0x29, + DBG_BLOCK_ID_TA08_BY4 = 0x2a, + DBG_BLOCK_ID_UNUSED35_BY4 = 0x2b, + DBG_BLOCK_ID_TA10_BY4 = 0x2c, + DBG_BLOCK_ID_TA14_BY4 = 0x2d, + DBG_BLOCK_ID_TA18_BY4 = 0x2e, + DBG_BLOCK_ID_UNUSED39_BY4 = 0x2f, + DBG_BLOCK_ID_TD00_BY4 = 0x30, + DBG_BLOCK_ID_TD04_BY4 = 0x31, + DBG_BLOCK_ID_TD08_BY4 = 0x32, + DBG_BLOCK_ID_UNUSED43_BY4 = 0x33, + DBG_BLOCK_ID_TD10_BY4 = 0x34, + DBG_BLOCK_ID_TD14_BY4 = 0x35, + DBG_BLOCK_ID_TD18_BY4 = 0x36, + DBG_BLOCK_ID_UNUSED47_BY4 = 0x37, + DBG_BLOCK_ID_MCD0_BY4 = 0x38, + DBG_BLOCK_ID_MCD4_BY4 = 0x39, +} DebugBlockId_BY4; +typedef enum DebugBlockId_BY8 { + DBG_BLOCK_ID_RESERVED_BY8 = 0x0, + DBG_BLOCK_ID_CSC_BY8 = 0x1, + DBG_BLOCK_ID_DMA0_BY8 = 0x2, + DBG_BLOCK_ID_CP0_BY8 = 0x3, + DBG_BLOCK_ID_VGT0_BY8 = 0x4, + DBG_BLOCK_ID_TCAA_BY8 = 0x5, + DBG_BLOCK_ID_SX0_BY8 = 0x6, + DBG_BLOCK_ID_PC0_BY8 = 0x7, + DBG_BLOCK_ID_SCB0_BY8 = 0x8, + DBG_BLOCK_ID_BCI0_BY8 = 0x9, + DBG_BLOCK_ID_CB00_BY8 = 0xa, + DBG_BLOCK_ID_CB10_BY8 = 0xb, + DBG_BLOCK_ID_TCP0_BY8 = 0xc, + DBG_BLOCK_ID_TCP8_BY8 = 0xd, + DBG_BLOCK_ID_TCP16_BY8 = 0xe, + DBG_BLOCK_ID_TCP_RESERVED0_BY8 = 0xf, + DBG_BLOCK_ID_DB00_BY8 = 0x10, + DBG_BLOCK_ID_DB10_BY8 = 0x11, + DBG_BLOCK_ID_TCC0_BY8 = 0x12, + DBG_BLOCK_ID_SPS00_BY8 = 0x13, + DBG_BLOCK_ID_TA00_BY8 = 0x14, + DBG_BLOCK_ID_TA08_BY8 = 0x15, + DBG_BLOCK_ID_TA10_BY8 = 0x16, + DBG_BLOCK_ID_TA18_BY8 = 0x17, + DBG_BLOCK_ID_TD00_BY8 = 0x18, + DBG_BLOCK_ID_TD08_BY8 = 0x19, + DBG_BLOCK_ID_TD10_BY8 = 0x1a, + DBG_BLOCK_ID_TD18_BY8 = 0x1b, + DBG_BLOCK_ID_MCD0_BY8 = 0x1c, +} DebugBlockId_BY8; +typedef enum DebugBlockId_BY16 { + DBG_BLOCK_ID_RESERVED_BY16 = 0x0, + DBG_BLOCK_ID_DMA0_BY16 = 0x1, + DBG_BLOCK_ID_VGT0_BY16 = 0x2, + DBG_BLOCK_ID_SX0_BY16 = 0x3, + DBG_BLOCK_ID_SCB0_BY16 = 0x4, + DBG_BLOCK_ID_CB00_BY16 = 0x5, + DBG_BLOCK_ID_TCP0_BY16 = 0x6, + DBG_BLOCK_ID_TCP16_BY16 = 0x7, + DBG_BLOCK_ID_DB00_BY16 = 0x8, + DBG_BLOCK_ID_TCC0_BY16 = 0x9, + DBG_BLOCK_ID_TA00_BY16 = 0xa, + DBG_BLOCK_ID_TA10_BY16 = 0xb, + DBG_BLOCK_ID_TD00_BY16 = 0xc, + DBG_BLOCK_ID_TD10_BY16 = 0xd, + DBG_BLOCK_ID_MCD0_BY16 = 0xe, +} DebugBlockId_BY16; +typedef enum ColorTransform { + DCC_CT_AUTO = 0x0, + DCC_CT_NONE = 0x1, + ABGR_TO_A_BG_G_RB = 0x2, + BGRA_TO_BG_G_RB_A = 0x3, +} ColorTransform; +typedef enum CompareRef { + REF_NEVER = 0x0, + REF_LESS = 0x1, + REF_EQUAL = 0x2, + REF_LEQUAL = 0x3, + REF_GREATER = 0x4, + REF_NOTEQUAL = 0x5, + REF_GEQUAL = 0x6, + REF_ALWAYS = 0x7, +} CompareRef; +typedef enum ReadSize { + READ_256_BITS = 0x0, + READ_512_BITS = 0x1, +} ReadSize; +typedef enum DepthFormat { + DEPTH_INVALID = 0x0, + DEPTH_16 = 0x1, + DEPTH_X8_24 = 0x2, + DEPTH_8_24 = 0x3, + DEPTH_X8_24_FLOAT = 0x4, + DEPTH_8_24_FLOAT = 0x5, + DEPTH_32_FLOAT = 0x6, + DEPTH_X24_8_32_FLOAT = 0x7, +} DepthFormat; +typedef enum ZFormat { + Z_INVALID = 0x0, + Z_16 = 0x1, + Z_24 = 0x2, + Z_32_FLOAT = 0x3, +} ZFormat; +typedef enum StencilFormat { + STENCIL_INVALID = 0x0, + STENCIL_8 = 0x1, +} StencilFormat; +typedef enum CmaskMode { + CMASK_CLEAR_NONE = 0x0, + CMASK_CLEAR_ONE = 0x1, + CMASK_CLEAR_ALL = 0x2, + CMASK_ANY_EXPANDED = 0x3, + CMASK_ALPHA0_FRAG1 = 0x4, + CMASK_ALPHA0_FRAG2 = 0x5, + CMASK_ALPHA0_FRAG4 = 0x6, + CMASK_ALPHA0_FRAGS = 0x7, + CMASK_ALPHA1_FRAG1 = 0x8, + CMASK_ALPHA1_FRAG2 = 0x9, + CMASK_ALPHA1_FRAG4 = 0xa, + CMASK_ALPHA1_FRAGS = 0xb, + CMASK_ALPHAX_FRAG1 = 0xc, + CMASK_ALPHAX_FRAG2 = 0xd, + CMASK_ALPHAX_FRAG4 = 0xe, + CMASK_ALPHAX_FRAGS = 0xf, +} CmaskMode; +typedef enum QuadExportFormat { + EXPORT_UNUSED = 0x0, + EXPORT_32_R = 0x1, + EXPORT_32_GR = 0x2, + EXPORT_32_AR = 0x3, + EXPORT_FP16_ABGR = 0x4, + EXPORT_UNSIGNED16_ABGR = 0x5, + EXPORT_SIGNED16_ABGR = 0x6, + EXPORT_32_ABGR = 0x7, +} QuadExportFormat; +typedef enum QuadExportFormatOld { + EXPORT_4P_32BPC_ABGR = 0x0, + EXPORT_4P_16BPC_ABGR = 0x1, + EXPORT_4P_32BPC_GR = 0x2, + EXPORT_4P_32BPC_AR = 0x3, + EXPORT_2P_32BPC_ABGR = 0x4, + EXPORT_8P_32BPC_R = 0x5, +} QuadExportFormatOld; +typedef enum ColorFormat { + COLOR_INVALID = 0x0, + COLOR_8 = 0x1, + COLOR_16 = 0x2, + COLOR_8_8 = 0x3, + COLOR_32 = 0x4, + COLOR_16_16 = 0x5, + COLOR_10_11_11 = 0x6, + COLOR_11_11_10 = 0x7, + COLOR_10_10_10_2 = 0x8, + COLOR_2_10_10_10 = 0x9, + COLOR_8_8_8_8 = 0xa, + COLOR_32_32 = 0xb, + COLOR_16_16_16_16 = 0xc, + COLOR_RESERVED_13 = 0xd, + COLOR_32_32_32_32 = 0xe, + COLOR_RESERVED_15 = 0xf, + COLOR_5_6_5 = 0x10, + COLOR_1_5_5_5 = 0x11, + COLOR_5_5_5_1 = 0x12, + COLOR_4_4_4_4 = 0x13, + COLOR_8_24 = 0x14, + COLOR_24_8 = 0x15, + COLOR_X24_8_32_FLOAT = 0x16, + COLOR_RESERVED_23 = 0x17, +} ColorFormat; +typedef enum SurfaceFormat { + FMT_INVALID = 0x0, + FMT_8 = 0x1, + FMT_16 = 0x2, + FMT_8_8 = 0x3, + FMT_32 = 0x4, + FMT_16_16 = 0x5, + FMT_10_11_11 = 0x6, + FMT_11_11_10 = 0x7, + FMT_10_10_10_2 = 0x8, + FMT_2_10_10_10 = 0x9, + FMT_8_8_8_8 = 0xa, + FMT_32_32 = 0xb, + FMT_16_16_16_16 = 0xc, + FMT_32_32_32 = 0xd, + FMT_32_32_32_32 = 0xe, + FMT_RESERVED_4 = 0xf, + FMT_5_6_5 = 0x10, + FMT_1_5_5_5 = 0x11, + FMT_5_5_5_1 = 0x12, + FMT_4_4_4_4 = 0x13, + FMT_8_24 = 0x14, + FMT_24_8 = 0x15, + FMT_X24_8_32_FLOAT = 0x16, + FMT_RESERVED_33 = 0x17, + FMT_11_11_10_FLOAT = 0x18, + FMT_16_FLOAT = 0x19, + FMT_32_FLOAT = 0x1a, + FMT_16_16_FLOAT = 0x1b, + FMT_8_24_FLOAT = 0x1c, + FMT_24_8_FLOAT = 0x1d, + FMT_32_32_FLOAT = 0x1e, + FMT_10_11_11_FLOAT = 0x1f, + FMT_16_16_16_16_FLOAT = 0x20, + FMT_3_3_2 = 0x21, + FMT_6_5_5 = 0x22, + FMT_32_32_32_32_FLOAT = 0x23, + FMT_RESERVED_36 = 0x24, + FMT_1 = 0x25, + FMT_1_REVERSED = 0x26, + FMT_GB_GR = 0x27, + FMT_BG_RG = 0x28, + FMT_32_AS_8 = 0x29, + FMT_32_AS_8_8 = 0x2a, + FMT_5_9_9_9_SHAREDEXP = 0x2b, + FMT_8_8_8 = 0x2c, + FMT_16_16_16 = 0x2d, + FMT_16_16_16_FLOAT = 0x2e, + FMT_4_4 = 0x2f, + FMT_32_32_32_FLOAT = 0x30, + FMT_BC1 = 0x31, + FMT_BC2 = 0x32, + FMT_BC3 = 0x33, + FMT_BC4 = 0x34, + FMT_BC5 = 0x35, + FMT_BC6 = 0x36, + FMT_BC7 = 0x37, + FMT_32_AS_32_32_32_32 = 0x38, + FMT_APC3 = 0x39, + FMT_APC4 = 0x3a, + FMT_APC5 = 0x3b, + FMT_APC6 = 0x3c, + FMT_APC7 = 0x3d, + FMT_CTX1 = 0x3e, + FMT_RESERVED_63 = 0x3f, +} SurfaceFormat; +typedef enum BUF_DATA_FORMAT { + BUF_DATA_FORMAT_INVALID = 0x0, + BUF_DATA_FORMAT_8 = 0x1, + BUF_DATA_FORMAT_16 = 0x2, + BUF_DATA_FORMAT_8_8 = 0x3, + BUF_DATA_FORMAT_32 = 0x4, + BUF_DATA_FORMAT_16_16 = 0x5, + BUF_DATA_FORMAT_10_11_11 = 0x6, + BUF_DATA_FORMAT_11_11_10 = 0x7, + BUF_DATA_FORMAT_10_10_10_2 = 0x8, + BUF_DATA_FORMAT_2_10_10_10 = 0x9, + BUF_DATA_FORMAT_8_8_8_8 = 0xa, + BUF_DATA_FORMAT_32_32 = 0xb, + BUF_DATA_FORMAT_16_16_16_16 = 0xc, + BUF_DATA_FORMAT_32_32_32 = 0xd, + BUF_DATA_FORMAT_32_32_32_32 = 0xe, + BUF_DATA_FORMAT_RESERVED_15 = 0xf, +} BUF_DATA_FORMAT; +typedef enum IMG_DATA_FORMAT { + IMG_DATA_FORMAT_INVALID = 0x0, + IMG_DATA_FORMAT_8 = 0x1, + IMG_DATA_FORMAT_16 = 0x2, + IMG_DATA_FORMAT_8_8 = 0x3, + IMG_DATA_FORMAT_32 = 0x4, + IMG_DATA_FORMAT_16_16 = 0x5, + IMG_DATA_FORMAT_10_11_11 = 0x6, + IMG_DATA_FORMAT_11_11_10 = 0x7, + IMG_DATA_FORMAT_10_10_10_2 = 0x8, + IMG_DATA_FORMAT_2_10_10_10 = 0x9, + IMG_DATA_FORMAT_8_8_8_8 = 0xa, + IMG_DATA_FORMAT_32_32 = 0xb, + IMG_DATA_FORMAT_16_16_16_16 = 0xc, + IMG_DATA_FORMAT_32_32_32 = 0xd, + IMG_DATA_FORMAT_32_32_32_32 = 0xe, + IMG_DATA_FORMAT_RESERVED_15 = 0xf, + IMG_DATA_FORMAT_5_6_5 = 0x10, + IMG_DATA_FORMAT_1_5_5_5 = 0x11, + IMG_DATA_FORMAT_5_5_5_1 = 0x12, + IMG_DATA_FORMAT_4_4_4_4 = 0x13, + IMG_DATA_FORMAT_8_24 = 0x14, + IMG_DATA_FORMAT_24_8 = 0x15, + IMG_DATA_FORMAT_X24_8_32 = 0x16, + IMG_DATA_FORMAT_RESERVED_23 = 0x17, + IMG_DATA_FORMAT_RESERVED_24 = 0x18, + IMG_DATA_FORMAT_RESERVED_25 = 0x19, + IMG_DATA_FORMAT_RESERVED_26 = 0x1a, + IMG_DATA_FORMAT_RESERVED_27 = 0x1b, + IMG_DATA_FORMAT_RESERVED_28 = 0x1c, + IMG_DATA_FORMAT_RESERVED_29 = 0x1d, + IMG_DATA_FORMAT_RESERVED_30 = 0x1e, + IMG_DATA_FORMAT_RESERVED_31 = 0x1f, + IMG_DATA_FORMAT_GB_GR = 0x20, + IMG_DATA_FORMAT_BG_RG = 0x21, + IMG_DATA_FORMAT_5_9_9_9 = 0x22, + IMG_DATA_FORMAT_BC1 = 0x23, + IMG_DATA_FORMAT_BC2 = 0x24, + IMG_DATA_FORMAT_BC3 = 0x25, + IMG_DATA_FORMAT_BC4 = 0x26, + IMG_DATA_FORMAT_BC5 = 0x27, + IMG_DATA_FORMAT_BC6 = 0x28, + IMG_DATA_FORMAT_BC7 = 0x29, + IMG_DATA_FORMAT_RESERVED_42 = 0x2a, + IMG_DATA_FORMAT_RESERVED_43 = 0x2b, + IMG_DATA_FORMAT_FMASK8_S2_F1 = 0x2c, + IMG_DATA_FORMAT_FMASK8_S4_F1 = 0x2d, + IMG_DATA_FORMAT_FMASK8_S8_F1 = 0x2e, + IMG_DATA_FORMAT_FMASK8_S2_F2 = 0x2f, + IMG_DATA_FORMAT_FMASK8_S4_F2 = 0x30, + IMG_DATA_FORMAT_FMASK8_S4_F4 = 0x31, + IMG_DATA_FORMAT_FMASK16_S16_F1 = 0x32, + IMG_DATA_FORMAT_FMASK16_S8_F2 = 0x33, + IMG_DATA_FORMAT_FMASK32_S16_F2 = 0x34, + IMG_DATA_FORMAT_FMASK32_S8_F4 = 0x35, + IMG_DATA_FORMAT_FMASK32_S8_F8 = 0x36, + IMG_DATA_FORMAT_FMASK64_S16_F4 = 0x37, + IMG_DATA_FORMAT_FMASK64_S16_F8 = 0x38, + IMG_DATA_FORMAT_4_4 = 0x39, + IMG_DATA_FORMAT_6_5_5 = 0x3a, + IMG_DATA_FORMAT_1 = 0x3b, + IMG_DATA_FORMAT_1_REVERSED = 0x3c, + IMG_DATA_FORMAT_32_AS_8 = 0x3d, + IMG_DATA_FORMAT_32_AS_8_8 = 0x3e, + IMG_DATA_FORMAT_32_AS_32_32_32_32 = 0x3f, +} IMG_DATA_FORMAT; +typedef enum BUF_NUM_FORMAT { + BUF_NUM_FORMAT_UNORM = 0x0, + BUF_NUM_FORMAT_SNORM = 0x1, + BUF_NUM_FORMAT_USCALED = 0x2, + BUF_NUM_FORMAT_SSCALED = 0x3, + BUF_NUM_FORMAT_UINT = 0x4, + BUF_NUM_FORMAT_SINT = 0x5, + BUF_NUM_FORMAT_RESERVED_6 = 0x6, + BUF_NUM_FORMAT_FLOAT = 0x7, +} BUF_NUM_FORMAT; +typedef enum IMG_NUM_FORMAT { + IMG_NUM_FORMAT_UNORM = 0x0, + IMG_NUM_FORMAT_SNORM = 0x1, + IMG_NUM_FORMAT_USCALED = 0x2, + IMG_NUM_FORMAT_SSCALED = 0x3, + IMG_NUM_FORMAT_UINT = 0x4, + IMG_NUM_FORMAT_SINT = 0x5, + IMG_NUM_FORMAT_RESERVED_6 = 0x6, + IMG_NUM_FORMAT_FLOAT = 0x7, + IMG_NUM_FORMAT_RESERVED_8 = 0x8, + IMG_NUM_FORMAT_SRGB = 0x9, + IMG_NUM_FORMAT_RESERVED_10 = 0xa, + IMG_NUM_FORMAT_RESERVED_11 = 0xb, + IMG_NUM_FORMAT_RESERVED_12 = 0xc, + IMG_NUM_FORMAT_RESERVED_13 = 0xd, + IMG_NUM_FORMAT_RESERVED_14 = 0xe, + IMG_NUM_FORMAT_RESERVED_15 = 0xf, +} IMG_NUM_FORMAT; +typedef enum TileType { + ARRAY_COLOR_TILE = 0x0, + ARRAY_DEPTH_TILE = 0x1, +} TileType; +typedef enum NonDispTilingOrder { + ADDR_SURF_MICRO_TILING_DISPLAY = 0x0, + ADDR_SURF_MICRO_TILING_NON_DISPLAY = 0x1, +} NonDispTilingOrder; +typedef enum MicroTileMode { + ADDR_SURF_DISPLAY_MICRO_TILING = 0x0, + ADDR_SURF_THIN_MICRO_TILING = 0x1, + ADDR_SURF_DEPTH_MICRO_TILING = 0x2, + ADDR_SURF_ROTATED_MICRO_TILING = 0x3, + ADDR_SURF_THICK_MICRO_TILING = 0x4, +} MicroTileMode; +typedef enum TileSplit { + ADDR_SURF_TILE_SPLIT_64B = 0x0, + ADDR_SURF_TILE_SPLIT_128B = 0x1, + ADDR_SURF_TILE_SPLIT_256B = 0x2, + ADDR_SURF_TILE_SPLIT_512B = 0x3, + ADDR_SURF_TILE_SPLIT_1KB = 0x4, + ADDR_SURF_TILE_SPLIT_2KB = 0x5, + ADDR_SURF_TILE_SPLIT_4KB = 0x6, +} TileSplit; +typedef enum SampleSplit { + ADDR_SURF_SAMPLE_SPLIT_1 = 0x0, + ADDR_SURF_SAMPLE_SPLIT_2 = 0x1, + ADDR_SURF_SAMPLE_SPLIT_4 = 0x2, + ADDR_SURF_SAMPLE_SPLIT_8 = 0x3, +} SampleSplit; +typedef enum PipeConfig { + ADDR_SURF_P2 = 0x0, + ADDR_SURF_P2_RESERVED0 = 0x1, + ADDR_SURF_P2_RESERVED1 = 0x2, + ADDR_SURF_P2_RESERVED2 = 0x3, + ADDR_SURF_P4_8x16 = 0x4, + ADDR_SURF_P4_16x16 = 0x5, + ADDR_SURF_P4_16x32 = 0x6, + ADDR_SURF_P4_32x32 = 0x7, + ADDR_SURF_P8_16x16_8x16 = 0x8, + ADDR_SURF_P8_16x32_8x16 = 0x9, + ADDR_SURF_P8_32x32_8x16 = 0xa, + ADDR_SURF_P8_16x32_16x16 = 0xb, + ADDR_SURF_P8_32x32_16x16 = 0xc, + ADDR_SURF_P8_32x32_16x32 = 0xd, + ADDR_SURF_P8_32x64_32x32 = 0xe, + ADDR_SURF_P8_RESERVED0 = 0xf, + ADDR_SURF_P16_32x32_8x16 = 0x10, + ADDR_SURF_P16_32x32_16x16 = 0x11, +} PipeConfig; +typedef enum NumBanks { + ADDR_SURF_2_BANK = 0x0, + ADDR_SURF_4_BANK = 0x1, + ADDR_SURF_8_BANK = 0x2, + ADDR_SURF_16_BANK = 0x3, +} NumBanks; +typedef enum BankWidth { + ADDR_SURF_BANK_WIDTH_1 = 0x0, + ADDR_SURF_BANK_WIDTH_2 = 0x1, + ADDR_SURF_BANK_WIDTH_4 = 0x2, + ADDR_SURF_BANK_WIDTH_8 = 0x3, +} BankWidth; +typedef enum BankHeight { + ADDR_SURF_BANK_HEIGHT_1 = 0x0, + ADDR_SURF_BANK_HEIGHT_2 = 0x1, + ADDR_SURF_BANK_HEIGHT_4 = 0x2, + ADDR_SURF_BANK_HEIGHT_8 = 0x3, +} BankHeight; +typedef enum BankWidthHeight { + ADDR_SURF_BANK_WH_1 = 0x0, + ADDR_SURF_BANK_WH_2 = 0x1, + ADDR_SURF_BANK_WH_4 = 0x2, + ADDR_SURF_BANK_WH_8 = 0x3, +} BankWidthHeight; +typedef enum MacroTileAspect { + ADDR_SURF_MACRO_ASPECT_1 = 0x0, + ADDR_SURF_MACRO_ASPECT_2 = 0x1, + ADDR_SURF_MACRO_ASPECT_4 = 0x2, + ADDR_SURF_MACRO_ASPECT_8 = 0x3, +} MacroTileAspect; +typedef enum GATCL1RequestType { + GATCL1_TYPE_NORMAL = 0x0, + GATCL1_TYPE_SHOOTDOWN = 0x1, + GATCL1_TYPE_BYPASS = 0x2, +} GATCL1RequestType; +typedef enum TCC_CACHE_POLICIES { + TCC_CACHE_POLICY_LRU = 0x0, + TCC_CACHE_POLICY_STREAM = 0x1, +} TCC_CACHE_POLICIES; +typedef enum MTYPE { + MTYPE_NC_NV = 0x0, + MTYPE_NC = 0x1, + MTYPE_CC = 0x2, + MTYPE_UC = 0x3, +} MTYPE; +typedef enum PERFMON_COUNTER_MODE { + PERFMON_COUNTER_MODE_ACCUM = 0x0, + PERFMON_COUNTER_MODE_ACTIVE_CYCLES = 0x1, + PERFMON_COUNTER_MODE_MAX = 0x2, + PERFMON_COUNTER_MODE_DIRTY = 0x3, + PERFMON_COUNTER_MODE_SAMPLE = 0x4, + PERFMON_COUNTER_MODE_CYCLES_SINCE_FIRST_EVENT = 0x5, + PERFMON_COUNTER_MODE_CYCLES_SINCE_LAST_EVENT = 0x6, + PERFMON_COUNTER_MODE_CYCLES_GE_HI = 0x7, + PERFMON_COUNTER_MODE_CYCLES_EQ_HI = 0x8, + PERFMON_COUNTER_MODE_INACTIVE_CYCLES = 0x9, + PERFMON_COUNTER_MODE_RESERVED = 0xf, +} PERFMON_COUNTER_MODE; +typedef enum PERFMON_SPM_MODE { + PERFMON_SPM_MODE_OFF = 0x0, + PERFMON_SPM_MODE_16BIT_CLAMP = 0x1, + PERFMON_SPM_MODE_16BIT_NO_CLAMP = 0x2, + PERFMON_SPM_MODE_32BIT_CLAMP = 0x3, + PERFMON_SPM_MODE_32BIT_NO_CLAMP = 0x4, + PERFMON_SPM_MODE_RESERVED_5 = 0x5, + PERFMON_SPM_MODE_RESERVED_6 = 0x6, + PERFMON_SPM_MODE_RESERVED_7 = 0x7, + PERFMON_SPM_MODE_TEST_MODE_0 = 0x8, + PERFMON_SPM_MODE_TEST_MODE_1 = 0x9, + PERFMON_SPM_MODE_TEST_MODE_2 = 0xa, +} PERFMON_SPM_MODE; +typedef enum SurfaceTiling { + ARRAY_LINEAR = 0x0, + ARRAY_TILED = 0x1, +} SurfaceTiling; +typedef enum SurfaceArray { + ARRAY_1D = 0x0, + ARRAY_2D = 0x1, + ARRAY_3D = 0x2, + ARRAY_3D_SLICE = 0x3, +} SurfaceArray; +typedef enum ColorArray { + ARRAY_2D_ALT_COLOR = 0x0, + ARRAY_2D_COLOR = 0x1, + ARRAY_3D_SLICE_COLOR = 0x3, +} ColorArray; +typedef enum DepthArray { + ARRAY_2D_ALT_DEPTH = 0x0, + ARRAY_2D_DEPTH = 0x1, +} DepthArray; +typedef enum ENUM_NUM_SIMD_PER_CU { + NUM_SIMD_PER_CU = 0x4, +} ENUM_NUM_SIMD_PER_CU; +typedef enum MEM_PWR_FORCE_CTRL { + NO_FORCE_REQUEST = 0x0, + FORCE_LIGHT_SLEEP_REQUEST = 0x1, + FORCE_DEEP_SLEEP_REQUEST = 0x2, + FORCE_SHUT_DOWN_REQUEST = 0x3, +} MEM_PWR_FORCE_CTRL; +typedef enum MEM_PWR_FORCE_CTRL2 { + NO_FORCE_REQ = 0x0, + FORCE_LIGHT_SLEEP_REQ = 0x1, +} MEM_PWR_FORCE_CTRL2; +typedef enum MEM_PWR_DIS_CTRL { + ENABLE_MEM_PWR_CTRL = 0x0, + DISABLE_MEM_PWR_CTRL = 0x1, +} MEM_PWR_DIS_CTRL; +typedef enum MEM_PWR_SEL_CTRL { + DYNAMIC_SHUT_DOWN_ENABLE = 0x0, + DYNAMIC_DEEP_SLEEP_ENABLE = 0x1, + DYNAMIC_LIGHT_SLEEP_ENABLE = 0x2, +} MEM_PWR_SEL_CTRL; +typedef enum MEM_PWR_SEL_CTRL2 { + DYNAMIC_DEEP_SLEEP_EN = 0x0, + DYNAMIC_LIGHT_SLEEP_EN = 0x1, +} MEM_PWR_SEL_CTRL2; + +#endif /* SMU_7_1_3_ENUM_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h new file mode 100644 index 000000000..1ede9e274 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h @@ -0,0 +1,6080 @@ +/* + * SMU_7_1_3 Register documentation + * + * Copyright (C) 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef SMU_7_1_3_SH_MASK_H +#define SMU_7_1_3_SH_MASK_H + +#define GCK_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff +#define GCK_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0 +#define GCK_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff +#define GCK_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0 +#define GCK_MCLK_FUSES__StartupMClkDid_MASK 0x7f +#define GCK_MCLK_FUSES__StartupMClkDid__SHIFT 0x0 +#define GCK_MCLK_FUSES__MClkADCA_MASK 0x780 +#define GCK_MCLK_FUSES__MClkADCA__SHIFT 0x7 +#define GCK_MCLK_FUSES__MClkDDCA_MASK 0x1800 +#define GCK_MCLK_FUSES__MClkDDCA__SHIFT 0xb +#define GCK_MCLK_FUSES__MClkDiDtWait_MASK 0xe000 +#define GCK_MCLK_FUSES__MClkDiDtWait__SHIFT 0xd +#define GCK_MCLK_FUSES__MClkDiDtFloor_MASK 0x30000 +#define GCK_MCLK_FUSES__MClkDiDtFloor__SHIFT 0x10 +#define CG_DCLK_CNTL__DCLK_DIVIDER_MASK 0x7f +#define CG_DCLK_CNTL__DCLK_DIVIDER__SHIFT 0x0 +#define CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK 0x100 +#define CG_DCLK_CNTL__DCLK_DIR_CNTL_EN__SHIFT 0x8 +#define CG_DCLK_CNTL__DCLK_DIR_CNTL_TOG_MASK 0x200 +#define CG_DCLK_CNTL__DCLK_DIR_CNTL_TOG__SHIFT 0x9 +#define CG_DCLK_CNTL__DCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00 +#define CG_DCLK_CNTL__DCLK_DIR_CNTL_DIVIDER__SHIFT 0xa +#define CG_DCLK_STATUS__DCLK_STATUS_MASK 0x1 +#define CG_DCLK_STATUS__DCLK_STATUS__SHIFT 0x0 +#define CG_DCLK_STATUS__DCLK_DIR_CNTL_DONETOG_MASK 0x2 +#define CG_DCLK_STATUS__DCLK_DIR_CNTL_DONETOG__SHIFT 0x1 +#define CG_VCLK_CNTL__VCLK_DIVIDER_MASK 0x7f +#define CG_VCLK_CNTL__VCLK_DIVIDER__SHIFT 0x0 +#define CG_VCLK_CNTL__VCLK_DIR_CNTL_EN_MASK 0x100 +#define CG_VCLK_CNTL__VCLK_DIR_CNTL_EN__SHIFT 0x8 +#define CG_VCLK_CNTL__VCLK_DIR_CNTL_TOG_MASK 0x200 +#define CG_VCLK_CNTL__VCLK_DIR_CNTL_TOG__SHIFT 0x9 +#define CG_VCLK_CNTL__VCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00 +#define CG_VCLK_CNTL__VCLK_DIR_CNTL_DIVIDER__SHIFT 0xa +#define CG_VCLK_STATUS__VCLK_STATUS_MASK 0x1 +#define CG_VCLK_STATUS__VCLK_STATUS__SHIFT 0x0 +#define CG_VCLK_STATUS__VCLK_DIR_CNTL_DONETOG_MASK 0x2 +#define CG_VCLK_STATUS__VCLK_DIR_CNTL_DONETOG__SHIFT 0x1 +#define CG_ECLK_CNTL__ECLK_DIVIDER_MASK 0x7f +#define CG_ECLK_CNTL__ECLK_DIVIDER__SHIFT 0x0 +#define CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK 0x100 +#define CG_ECLK_CNTL__ECLK_DIR_CNTL_EN__SHIFT 0x8 +#define CG_ECLK_CNTL__ECLK_DIR_CNTL_TOG_MASK 0x200 +#define CG_ECLK_CNTL__ECLK_DIR_CNTL_TOG__SHIFT 0x9 +#define CG_ECLK_CNTL__ECLK_DIR_CNTL_DIVIDER_MASK 0x1fc00 +#define CG_ECLK_CNTL__ECLK_DIR_CNTL_DIVIDER__SHIFT 0xa +#define CG_ECLK_STATUS__ECLK_STATUS_MASK 0x1 +#define CG_ECLK_STATUS__ECLK_STATUS__SHIFT 0x0 +#define CG_ECLK_STATUS__ECLK_DIR_CNTL_DONETOG_MASK 0x2 +#define CG_ECLK_STATUS__ECLK_DIR_CNTL_DONETOG__SHIFT 0x1 +#define CG_ACLK_CNTL__ACLK_DIVIDER_MASK 0x7f +#define CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT 0x0 +#define CG_ACLK_CNTL__ACLK_DIR_CNTL_EN_MASK 0x100 +#define CG_ACLK_CNTL__ACLK_DIR_CNTL_EN__SHIFT 0x8 +#define CG_ACLK_CNTL__ACLK_DIR_CNTL_TOG_MASK 0x200 +#define CG_ACLK_CNTL__ACLK_DIR_CNTL_TOG__SHIFT 0x9 +#define CG_ACLK_CNTL__ACLK_DIR_CNTL_DIVIDER_MASK 0x1fc00 +#define CG_ACLK_CNTL__ACLK_DIR_CNTL_DIVIDER__SHIFT 0xa +#define CG_MCLK_CNTL__MCLK_DIVIDER_MASK 0x7f +#define CG_MCLK_CNTL__MCLK_DIVIDER__SHIFT 0x0 +#define CG_MCLK_CNTL__MCLK_DIR_CNTL_EN_MASK 0x100 +#define CG_MCLK_CNTL__MCLK_DIR_CNTL_EN__SHIFT 0x8 +#define CG_MCLK_CNTL__MCLK_DIR_CNTL_TOG_MASK 0x200 +#define CG_MCLK_CNTL__MCLK_DIR_CNTL_TOG__SHIFT 0x9 +#define CG_MCLK_CNTL__MCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00 +#define CG_MCLK_CNTL__MCLK_DIR_CNTL_DIVIDER__SHIFT 0xa +#define CG_MCLK_STATUS__MCLK_STATUS_MASK 0x1 +#define CG_MCLK_STATUS__MCLK_STATUS__SHIFT 0x0 +#define CG_MCLK_STATUS__MCLK_DIR_CNTL_DONETOG_MASK 0x2 +#define CG_MCLK_STATUS__MCLK_DIR_CNTL_DONETOG__SHIFT 0x1 +#define GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK 0x1 +#define GCK_DFS_BYPASS_CNTL__BYPASSECLK__SHIFT 0x0 +#define GCK_DFS_BYPASS_CNTL__BYPASSLCLK_MASK 0x2 +#define GCK_DFS_BYPASS_CNTL__BYPASSLCLK__SHIFT 0x1 +#define GCK_DFS_BYPASS_CNTL__BYPASSEVCLK_MASK 0x4 +#define GCK_DFS_BYPASS_CNTL__BYPASSEVCLK__SHIFT 0x2 +#define GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK 0x8 +#define GCK_DFS_BYPASS_CNTL__BYPASSDCLK__SHIFT 0x3 +#define GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK 0x10 +#define GCK_DFS_BYPASS_CNTL__BYPASSVCLK__SHIFT 0x4 +#define GCK_DFS_BYPASS_CNTL__BYPASSDISPCLK_MASK 0x20 +#define GCK_DFS_BYPASS_CNTL__BYPASSDISPCLK__SHIFT 0x5 +#define GCK_DFS_BYPASS_CNTL__BYPASSDPREFCLK_MASK 0x40 +#define GCK_DFS_BYPASS_CNTL__BYPASSDPREFCLK__SHIFT 0x6 +#define GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK 0x80 +#define GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT 0x7 +#define GCK_DFS_BYPASS_CNTL__BYPASSADIVCLK_MASK 0x100 +#define GCK_DFS_BYPASS_CNTL__BYPASSADIVCLK__SHIFT 0x8 +#define GCK_DFS_BYPASS_CNTL__BYPASSPSPCLK_MASK 0x200 +#define GCK_DFS_BYPASS_CNTL__BYPASSPSPCLK__SHIFT 0x9 +#define GCK_DFS_BYPASS_CNTL__BYPASSSAMCLK_MASK 0x400 +#define GCK_DFS_BYPASS_CNTL__BYPASSSAMCLK__SHIFT 0xa +#define GCK_DFS_BYPASS_CNTL__BYPASSSCLK_MASK 0x800 +#define GCK_DFS_BYPASS_CNTL__BYPASSSCLK__SHIFT 0xb +#define GCK_DFS_BYPASS_CNTL__USE_SPLL_BYPASS_EN_MASK 0x1000 +#define GCK_DFS_BYPASS_CNTL__USE_SPLL_BYPASS_EN__SHIFT 0xc +#define GCK_DFS_BYPASS_CNTL__BYPASSMCLK_MASK 0x2000 +#define GCK_DFS_BYPASS_CNTL__BYPASSMCLK__SHIFT 0xd +#define CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK 0x1 +#define CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT 0x0 +#define CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK 0x2 +#define CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT 0x1 +#define CG_SPLL_FUNC_CNTL__SPLL_DIVEN_MASK 0x4 +#define CG_SPLL_FUNC_CNTL__SPLL_DIVEN__SHIFT 0x2 +#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK 0x8 +#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT 0x3 +#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_THRU_DFS_MASK 0x10 +#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_THRU_DFS__SHIFT 0x4 +#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x7e0 +#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT 0x5 +#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_UPDATE_MASK 0x800 +#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_UPDATE__SHIFT 0xb +#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_EN_MASK 0x1000 +#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_EN__SHIFT 0xc +#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK 0x7f00000 +#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT 0x14 +#define CG_SPLL_FUNC_CNTL__SPLL_DIVA_ACK_MASK 0x8000000 +#define CG_SPLL_FUNC_CNTL__SPLL_DIVA_ACK__SHIFT 0x1b +#define CG_SPLL_FUNC_CNTL__SPLL_OTEST_LOCK_EN_MASK 0x10000000 +#define CG_SPLL_FUNC_CNTL__SPLL_OTEST_LOCK_EN__SHIFT 0x1c +#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK 0x1ff +#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT 0x0 +#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_MASK 0x800 +#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ__SHIFT 0xb +#define CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK 0x400000 +#define CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT 0x16 +#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK 0x800000 +#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT 0x17 +#define CG_SPLL_FUNC_CNTL_2__SPLL_RESET_CHG_MASK 0x1000000 +#define CG_SPLL_FUNC_CNTL_2__SPLL_RESET_CHG__SHIFT 0x18 +#define CG_SPLL_FUNC_CNTL_2__SPLL_BABY_STEP_CHG_MASK 0x2000000 +#define CG_SPLL_FUNC_CNTL_2__SPLL_BABY_STEP_CHG__SHIFT 0x19 +#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE_MASK 0x4000000 +#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE__SHIFT 0x1a +#define CG_SPLL_FUNC_CNTL_2__SPLL_UNLOCK_CLEAR_MASK 0x8000000 +#define CG_SPLL_FUNC_CNTL_2__SPLL_UNLOCK_CLEAR__SHIFT 0x1b +#define CG_SPLL_FUNC_CNTL_2__SPLL_CLKF_UPDATE_MASK 0x10000000 +#define CG_SPLL_FUNC_CNTL_2__SPLL_CLKF_UPDATE__SHIFT 0x1c +#define CG_SPLL_FUNC_CNTL_2__SPLL_TEST_UNLOCK_CLR_MASK 0x40000000 +#define CG_SPLL_FUNC_CNTL_2__SPLL_TEST_UNLOCK_CLR__SHIFT 0x1e +#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x3ffffff +#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x0 +#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK 0x10000000 +#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN__SHIFT 0x1c +#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_TEST_SEL_MASK 0xf +#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_TEST_SEL__SHIFT 0x0 +#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_SEL_MASK 0x60 +#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_SEL__SHIFT 0x5 +#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EN_MASK 0x180 +#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EN__SHIFT 0x7 +#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_MASK 0xe00 +#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE__SHIFT 0x9 +#define CG_SPLL_FUNC_CNTL_4__PCC_INC_DIV_MASK 0x7f000 +#define CG_SPLL_FUNC_CNTL_4__PCC_INC_DIV__SHIFT 0xc +#define CG_SPLL_FUNC_CNTL_4__TEST_FRAC_BYPASS_MASK 0x200000 +#define CG_SPLL_FUNC_CNTL_4__TEST_FRAC_BYPASS__SHIFT 0x15 +#define CG_SPLL_FUNC_CNTL_4__SPLL_ILOCK_MASK 0x800000 +#define CG_SPLL_FUNC_CNTL_4__SPLL_ILOCK__SHIFT 0x17 +#define CG_SPLL_FUNC_CNTL_4__SPLL_FBCLK_SEL_MASK 0x1000000 +#define CG_SPLL_FUNC_CNTL_4__SPLL_FBCLK_SEL__SHIFT 0x18 +#define CG_SPLL_FUNC_CNTL_4__SPLL_VCTRLADC_EN_MASK 0x2000000 +#define CG_SPLL_FUNC_CNTL_4__SPLL_VCTRLADC_EN__SHIFT 0x19 +#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_MASK 0xc000000 +#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT__SHIFT 0x1a +#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_EXT_MASK 0x70000000 +#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_EXT__SHIFT 0x1c +#define CG_SPLL_FUNC_CNTL_4__SPLL_VTOI_BIAS_CNTL_MASK 0x80000000 +#define CG_SPLL_FUNC_CNTL_4__SPLL_VTOI_BIAS_CNTL__SHIFT 0x1f +#define CG_SPLL_FUNC_CNTL_5__FBDIV_SSC_BYPASS_MASK 0x1 +#define CG_SPLL_FUNC_CNTL_5__FBDIV_SSC_BYPASS__SHIFT 0x0 +#define CG_SPLL_FUNC_CNTL_5__RISEFBVCO_EN_MASK 0x2 +#define CG_SPLL_FUNC_CNTL_5__RISEFBVCO_EN__SHIFT 0x1 +#define CG_SPLL_FUNC_CNTL_5__PFD_RESET_CNTRL_MASK 0xc +#define CG_SPLL_FUNC_CNTL_5__PFD_RESET_CNTRL__SHIFT 0x2 +#define CG_SPLL_FUNC_CNTL_5__RESET_TIMER_MASK 0x30 +#define CG_SPLL_FUNC_CNTL_5__RESET_TIMER__SHIFT 0x4 +#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_CNTRL_MASK 0xc0 +#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_CNTRL__SHIFT 0x6 +#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_EN_MASK 0x100 +#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_EN__SHIFT 0x8 +#define CG_SPLL_FUNC_CNTL_5__RESET_ANTI_MUX_MASK 0x200 +#define CG_SPLL_FUNC_CNTL_5__RESET_ANTI_MUX__SHIFT 0x9 +#define CG_SPLL_FUNC_CNTL_6__SCLKMUX0_CLKOFF_CNT_MASK 0xff +#define CG_SPLL_FUNC_CNTL_6__SCLKMUX0_CLKOFF_CNT__SHIFT 0x0 +#define CG_SPLL_FUNC_CNTL_6__SCLKMUX1_CLKOFF_CNT_MASK 0xff00 +#define CG_SPLL_FUNC_CNTL_6__SCLKMUX1_CLKOFF_CNT__SHIFT 0x8 +#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_EN_MASK 0x10000 +#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_EN__SHIFT 0x10 +#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_IN_MASK 0x1e0000 +#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_IN__SHIFT 0x11 +#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_OUT_MASK 0x1e00000 +#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_OUT__SHIFT 0x15 +#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR_MASK 0xfe000000 +#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19 +#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff +#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0 +#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1 +#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0 +#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2 +#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV__SHIFT 0x1 +#define SPLL_CNTL_MODE__SPLL_TEST_MASK 0x4 +#define SPLL_CNTL_MODE__SPLL_TEST__SHIFT 0x2 +#define SPLL_CNTL_MODE__SPLL_FASTEN_MASK 0x8 +#define SPLL_CNTL_MODE__SPLL_FASTEN__SHIFT 0x3 +#define SPLL_CNTL_MODE__SPLL_ENSAT_MASK 0x10 +#define SPLL_CNTL_MODE__SPLL_ENSAT__SHIFT 0x4 +#define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV_MASK 0xc00 +#define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV__SHIFT 0xa +#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT_MASK 0xff000 +#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT__SHIFT 0xc +#define SPLL_CNTL_MODE__SPLL_RESET_EN_MASK 0x10000000 +#define SPLL_CNTL_MODE__SPLL_RESET_EN__SHIFT 0x1c +#define SPLL_CNTL_MODE__SPLL_VCO_MODE_MASK 0x60000000 +#define SPLL_CNTL_MODE__SPLL_VCO_MODE__SHIFT 0x1d +#define CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK 0x1 +#define CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT 0x0 +#define CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK 0xfff0 +#define CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT 0x4 +#define CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK 0x3ffffff +#define CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT 0x0 +#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK 0xff00 +#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT 0x8 +#define CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK 0x2 +#define CG_CLKPIN_CNTL__XTALIN_DIVIDE__SHIFT 0x1 +#define CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK 0x4 +#define CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT 0x2 +#define CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK 0x1 +#define CG_CLKPIN_CNTL_2__ENABLE_XCLK__SHIFT 0x0 +#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK 0x8 +#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT 0x3 +#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK_MASK 0x100 +#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK__SHIFT 0x8 +#define CG_CLKPIN_CNTL_2__XO_IN_OSCIN_EN_MASK 0x4000 +#define CG_CLKPIN_CNTL_2__XO_IN_OSCIN_EN__SHIFT 0xe +#define CG_CLKPIN_CNTL_2__XO_IN_ICORE_CLK_OE_MASK 0x8000 +#define CG_CLKPIN_CNTL_2__XO_IN_ICORE_CLK_OE__SHIFT 0xf +#define CG_CLKPIN_CNTL_2__XO_IN_CML_RXEN_MASK 0x10000 +#define CG_CLKPIN_CNTL_2__XO_IN_CML_RXEN__SHIFT 0x10 +#define CG_CLKPIN_CNTL_2__XO_IN_BIDIR_CML_OE_MASK 0x20000 +#define CG_CLKPIN_CNTL_2__XO_IN_BIDIR_CML_OE__SHIFT 0x11 +#define CG_CLKPIN_CNTL_2__XO_IN2_OSCIN_EN_MASK 0x40000 +#define CG_CLKPIN_CNTL_2__XO_IN2_OSCIN_EN__SHIFT 0x12 +#define CG_CLKPIN_CNTL_2__XO_IN2_ICORE_CLK_OE_MASK 0x80000 +#define CG_CLKPIN_CNTL_2__XO_IN2_ICORE_CLK_OE__SHIFT 0x13 +#define CG_CLKPIN_CNTL_2__XO_IN2_CML_RXEN_MASK 0x100000 +#define CG_CLKPIN_CNTL_2__XO_IN2_CML_RXEN__SHIFT 0x14 +#define CG_CLKPIN_CNTL_2__XO_IN2_BIDIR_CML_OE_MASK 0x200000 +#define CG_CLKPIN_CNTL_2__XO_IN2_BIDIR_CML_OE__SHIFT 0x15 +#define CG_CLKPIN_CNTL_2__CML_CTRL_MASK 0xc00000 +#define CG_CLKPIN_CNTL_2__CML_CTRL__SHIFT 0x16 +#define CG_CLKPIN_CNTL_2__CLK_SPARE_MASK 0xff000000 +#define CG_CLKPIN_CNTL_2__CLK_SPARE__SHIFT 0x18 +#define CG_CLKPIN_CNTL_DC__OSC_EN_MASK 0x1 +#define CG_CLKPIN_CNTL_DC__OSC_EN__SHIFT 0x0 +#define CG_CLKPIN_CNTL_DC__XTL_LOW_GAIN_MASK 0x6 +#define CG_CLKPIN_CNTL_DC__XTL_LOW_GAIN__SHIFT 0x1 +#define CG_CLKPIN_CNTL_DC__XTL_XOCLK_DRV_R_EN_MASK 0x200 +#define CG_CLKPIN_CNTL_DC__XTL_XOCLK_DRV_R_EN__SHIFT 0x9 +#define CG_CLKPIN_CNTL_DC__XTALIN_SEL_MASK 0x1c00 +#define CG_CLKPIN_CNTL_DC__XTALIN_SEL__SHIFT 0xa +#define THM_CLK_CNTL__CMON_CLK_SEL_MASK 0xff +#define THM_CLK_CNTL__CMON_CLK_SEL__SHIFT 0x0 +#define THM_CLK_CNTL__TMON_CLK_SEL_MASK 0xff00 +#define THM_CLK_CNTL__TMON_CLK_SEL__SHIFT 0x8 +#define THM_CLK_CNTL__CTF_CLK_SHUTOFF_EN_MASK 0x10000 +#define THM_CLK_CNTL__CTF_CLK_SHUTOFF_EN__SHIFT 0x10 +#define MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK 0xff +#define MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT 0x0 +#define MISC_CLK_CTRL__ZCLK_SEL_MASK 0xff00 +#define MISC_CLK_CTRL__ZCLK_SEL__SHIFT 0x8 +#define MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK 0xff0000 +#define MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT 0x10 +#define GCK_PLL_TEST_CNTL__TST_SRC_SEL_MASK 0x1f +#define GCK_PLL_TEST_CNTL__TST_SRC_SEL__SHIFT 0x0 +#define GCK_PLL_TEST_CNTL__TST_REF_SEL_MASK 0x3e0 +#define GCK_PLL_TEST_CNTL__TST_REF_SEL__SHIFT 0x5 +#define GCK_PLL_TEST_CNTL__REF_TEST_COUNT_MASK 0x1fc00 +#define GCK_PLL_TEST_CNTL__REF_TEST_COUNT__SHIFT 0xa +#define GCK_PLL_TEST_CNTL__TST_RESET_MASK 0x20000 +#define GCK_PLL_TEST_CNTL__TST_RESET__SHIFT 0x11 +#define GCK_PLL_TEST_CNTL__TST_CLK_SEL_MODE_MASK 0x40000 +#define GCK_PLL_TEST_CNTL__TST_CLK_SEL_MODE__SHIFT 0x12 +#define GCK_PLL_TEST_CNTL_2__TEST_COUNT_MASK 0xfffe0000 +#define GCK_PLL_TEST_CNTL_2__TEST_COUNT__SHIFT 0x11 +#define GCK_ADFS_CLK_BYPASS_CNTL1__ECLK_BYPASS_CNTL_MASK 0x7 +#define GCK_ADFS_CLK_BYPASS_CNTL1__ECLK_BYPASS_CNTL__SHIFT 0x0 +#define GCK_ADFS_CLK_BYPASS_CNTL1__SCLK_BYPASS_CNTL_MASK 0x38 +#define GCK_ADFS_CLK_BYPASS_CNTL1__SCLK_BYPASS_CNTL__SHIFT 0x3 +#define GCK_ADFS_CLK_BYPASS_CNTL1__LCLK_BYPASS_CNTL_MASK 0x1c0 +#define GCK_ADFS_CLK_BYPASS_CNTL1__LCLK_BYPASS_CNTL__SHIFT 0x6 +#define GCK_ADFS_CLK_BYPASS_CNTL1__DCLK_BYPASS_CNTL_MASK 0xe00 +#define GCK_ADFS_CLK_BYPASS_CNTL1__DCLK_BYPASS_CNTL__SHIFT 0x9 +#define GCK_ADFS_CLK_BYPASS_CNTL1__VCLK_BYPASS_CNTL_MASK 0x7000 +#define GCK_ADFS_CLK_BYPASS_CNTL1__VCLK_BYPASS_CNTL__SHIFT 0xc +#define GCK_ADFS_CLK_BYPASS_CNTL1__DISPCLK_BYPASS_CNTL_MASK 0x38000 +#define GCK_ADFS_CLK_BYPASS_CNTL1__DISPCLK_BYPASS_CNTL__SHIFT 0xf +#define GCK_ADFS_CLK_BYPASS_CNTL1__DRREFCLK_BYPASS_CNTL_MASK 0x1c0000 +#define GCK_ADFS_CLK_BYPASS_CNTL1__DRREFCLK_BYPASS_CNTL__SHIFT 0x12 +#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_BYPASS_CNTL_MASK 0xe00000 +#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_BYPASS_CNTL__SHIFT 0x15 +#define GCK_ADFS_CLK_BYPASS_CNTL1__SAMCLK_BYPASS_CNTL_MASK 0x7000000 +#define GCK_ADFS_CLK_BYPASS_CNTL1__SAMCLK_BYPASS_CNTL__SHIFT 0x18 +#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_DIV_BYPASS_CNTL_MASK 0x38000000 +#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_DIV_BYPASS_CNTL__SHIFT 0x1b +#define SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff +#define SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0 +#define SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff +#define SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0 +#define SMC_IND_INDEX_0__SMC_IND_ADDR_MASK 0xffffffff +#define SMC_IND_INDEX_0__SMC_IND_ADDR__SHIFT 0x0 +#define SMC_IND_DATA_0__SMC_IND_DATA_MASK 0xffffffff +#define SMC_IND_DATA_0__SMC_IND_DATA__SHIFT 0x0 +#define SMC_IND_INDEX_1__SMC_IND_ADDR_MASK 0xffffffff +#define SMC_IND_INDEX_1__SMC_IND_ADDR__SHIFT 0x0 +#define SMC_IND_DATA_1__SMC_IND_DATA_MASK 0xffffffff +#define SMC_IND_DATA_1__SMC_IND_DATA__SHIFT 0x0 +#define SMC_IND_INDEX_2__SMC_IND_ADDR_MASK 0xffffffff +#define SMC_IND_INDEX_2__SMC_IND_ADDR__SHIFT 0x0 +#define SMC_IND_DATA_2__SMC_IND_DATA_MASK 0xffffffff +#define SMC_IND_DATA_2__SMC_IND_DATA__SHIFT 0x0 +#define SMC_IND_INDEX_3__SMC_IND_ADDR_MASK 0xffffffff +#define SMC_IND_INDEX_3__SMC_IND_ADDR__SHIFT 0x0 +#define SMC_IND_DATA_3__SMC_IND_DATA_MASK 0xffffffff +#define SMC_IND_DATA_3__SMC_IND_DATA__SHIFT 0x0 +#define SMC_IND_INDEX_4__SMC_IND_ADDR_MASK 0xffffffff +#define SMC_IND_INDEX_4__SMC_IND_ADDR__SHIFT 0x0 +#define SMC_IND_DATA_4__SMC_IND_DATA_MASK 0xffffffff +#define SMC_IND_DATA_4__SMC_IND_DATA__SHIFT 0x0 +#define SMC_IND_INDEX_5__SMC_IND_ADDR_MASK 0xffffffff +#define SMC_IND_INDEX_5__SMC_IND_ADDR__SHIFT 0x0 +#define SMC_IND_DATA_5__SMC_IND_DATA_MASK 0xffffffff +#define SMC_IND_DATA_5__SMC_IND_DATA__SHIFT 0x0 +#define SMC_IND_INDEX_6__SMC_IND_ADDR_MASK 0xffffffff +#define SMC_IND_INDEX_6__SMC_IND_ADDR__SHIFT 0x0 +#define SMC_IND_DATA_6__SMC_IND_DATA_MASK 0xffffffff +#define SMC_IND_DATA_6__SMC_IND_DATA__SHIFT 0x0 +#define SMC_IND_INDEX_7__SMC_IND_ADDR_MASK 0xffffffff +#define SMC_IND_INDEX_7__SMC_IND_ADDR__SHIFT 0x0 +#define SMC_IND_DATA_7__SMC_IND_DATA_MASK 0xffffffff +#define SMC_IND_DATA_7__SMC_IND_DATA__SHIFT 0x0 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK 0x1 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0__SHIFT 0x0 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1_MASK 0x2 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1__SHIFT 0x1 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2_MASK 0x4 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2__SHIFT 0x2 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3_MASK 0x8 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3__SHIFT 0x3 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_4_MASK 0x10 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_4__SHIFT 0x4 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_5_MASK 0x20 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_5__SHIFT 0x5 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_6_MASK 0x40 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_6__SHIFT 0x6 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_7_MASK 0x80 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_7__SHIFT 0x7 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_8_MASK 0x100 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_8__SHIFT 0x8 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_9_MASK 0x200 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_9__SHIFT 0x9 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_10_MASK 0x400 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_10__SHIFT 0xa +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_11_MASK 0x800 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_11__SHIFT 0xb +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_12_MASK 0x1000 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_12__SHIFT 0xc +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_13_MASK 0x2000 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_13__SHIFT 0xd +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_14_MASK 0x4000 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_14__SHIFT 0xe +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_15_MASK 0x8000 +#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_15__SHIFT 0xf +#define SMC_MESSAGE_0__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_0__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_0__SMC_RESP_MASK 0xffff +#define SMC_RESP_0__SMC_RESP__SHIFT 0x0 +#define SMC_MESSAGE_1__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_1__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_1__SMC_RESP_MASK 0xffff +#define SMC_RESP_1__SMC_RESP__SHIFT 0x0 +#define SMC_MESSAGE_2__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_2__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_2__SMC_RESP_MASK 0xffff +#define SMC_RESP_2__SMC_RESP__SHIFT 0x0 +#define SMC_MESSAGE_3__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_3__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_3__SMC_RESP_MASK 0xffff +#define SMC_RESP_3__SMC_RESP__SHIFT 0x0 +#define SMC_MESSAGE_4__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_4__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_4__SMC_RESP_MASK 0xffff +#define SMC_RESP_4__SMC_RESP__SHIFT 0x0 +#define SMC_MESSAGE_5__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_5__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_5__SMC_RESP_MASK 0xffff +#define SMC_RESP_5__SMC_RESP__SHIFT 0x0 +#define SMC_MESSAGE_6__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_6__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_6__SMC_RESP_MASK 0xffff +#define SMC_RESP_6__SMC_RESP__SHIFT 0x0 +#define SMC_MESSAGE_7__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_7__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_7__SMC_RESP_MASK 0xffff +#define SMC_RESP_7__SMC_RESP__SHIFT 0x0 +#define SMC_MSG_ARG_0__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_0__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MSG_ARG_1__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_1__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MSG_ARG_2__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_2__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MSG_ARG_3__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_3__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MSG_ARG_4__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_4__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MSG_ARG_5__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_5__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MSG_ARG_6__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_6__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MSG_ARG_7__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_7__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MESSAGE_8__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_8__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_8__SMC_RESP_MASK 0xffff +#define SMC_RESP_8__SMC_RESP__SHIFT 0x0 +#define SMC_MESSAGE_9__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_9__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_9__SMC_RESP_MASK 0xffff +#define SMC_RESP_9__SMC_RESP__SHIFT 0x0 +#define SMC_MESSAGE_10__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_10__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_10__SMC_RESP_MASK 0xffff +#define SMC_RESP_10__SMC_RESP__SHIFT 0x0 +#define SMC_MESSAGE_11__SMC_MSG_MASK 0xffff +#define SMC_MESSAGE_11__SMC_MSG__SHIFT 0x0 +#define SMC_RESP_11__SMC_RESP_MASK 0xffff +#define SMC_RESP_11__SMC_RESP__SHIFT 0x0 +#define SMC_MSG_ARG_8__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_8__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MSG_ARG_9__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_9__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MSG_ARG_10__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_10__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_MSG_ARG_11__SMC_MSG_ARG_MASK 0xffffffff +#define SMC_MSG_ARG_11__SMC_MSG_ARG__SHIFT 0x0 +#define SMC_SYSCON_RESET_CNTL__rst_reg_MASK 0x1 +#define SMC_SYSCON_RESET_CNTL__rst_reg__SHIFT 0x0 +#define SMC_SYSCON_RESET_CNTL__srbm_soft_rst_override_MASK 0x2 +#define SMC_SYSCON_RESET_CNTL__srbm_soft_rst_override__SHIFT 0x1 +#define SMC_SYSCON_RESET_CNTL__RegReset_MASK 0x40000000 +#define SMC_SYSCON_RESET_CNTL__RegReset__SHIFT 0x1e +#define SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK 0x1 +#define SMC_SYSCON_CLOCK_CNTL_0__ck_disable__SHIFT 0x0 +#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_en_MASK 0x2 +#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_en__SHIFT 0x1 +#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_timeout_MASK 0xffff00 +#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_timeout__SHIFT 0x8 +#define SMC_SYSCON_CLOCK_CNTL_0__cken_MASK 0x1000000 +#define SMC_SYSCON_CLOCK_CNTL_0__cken__SHIFT 0x18 +#define SMC_SYSCON_CLOCK_CNTL_1__auto_ck_disable_MASK 0x1 +#define SMC_SYSCON_CLOCK_CNTL_1__auto_ck_disable__SHIFT 0x0 +#define SMC_SYSCON_CLOCK_CNTL_2__wake_on_irq_MASK 0xffffffff +#define SMC_SYSCON_CLOCK_CNTL_2__wake_on_irq__SHIFT 0x0 +#define SMC_SYSCON_MISC_CNTL__dma_no_outstanding_MASK 0x2 +#define SMC_SYSCON_MISC_CNTL__dma_no_outstanding__SHIFT 0x1 +#define SMC_SYSCON_MSG_ARG_0__smc_msg_arg_MASK 0xffffffff +#define SMC_SYSCON_MSG_ARG_0__smc_msg_arg__SHIFT 0x0 +#define SMC_PC_C__smc_pc_c_MASK 0xffffffff +#define SMC_PC_C__smc_pc_c__SHIFT 0x0 +#define SMC_SCRATCH9__SCRATCH_VALUE_MASK 0xffffffff +#define SMC_SCRATCH9__SCRATCH_VALUE__SHIFT 0x0 +#define GPIOPAD_SW_INT_STAT__SW_INT_STAT_MASK 0x1 +#define GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x0 +#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SN_MASK 0xf +#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SN__SHIFT 0x0 +#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SP_MASK 0xf0 +#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SP__SHIFT 0x4 +#define GPIOPAD_MASK__GPIO_MASK_MASK 0x7fffffff +#define GPIOPAD_MASK__GPIO_MASK__SHIFT 0x0 +#define GPIOPAD_A__GPIO_A_MASK 0x7fffffff +#define GPIOPAD_A__GPIO_A__SHIFT 0x0 +#define GPIOPAD_EN__GPIO_EN_MASK 0x7fffffff +#define GPIOPAD_EN__GPIO_EN__SHIFT 0x0 +#define GPIOPAD_Y__GPIO_Y_MASK 0x7fffffff +#define GPIOPAD_Y__GPIO_Y__SHIFT 0x0 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0_MASK 0x1 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0__SHIFT 0x0 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1_MASK 0x2 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1__SHIFT 0x1 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2_MASK 0x4 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2__SHIFT 0x2 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3_MASK 0x8 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3__SHIFT 0x3 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4_MASK 0x10 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4__SHIFT 0x4 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5_MASK 0x20 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5__SHIFT 0x5 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6_MASK 0x40 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6__SHIFT 0x6 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7_MASK 0x80 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7__SHIFT 0x7 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8_MASK 0x100 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8__SHIFT 0x8 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9_MASK 0x200 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9__SHIFT 0x9 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10_MASK 0x400 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10__SHIFT 0xa +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11_MASK 0x800 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11__SHIFT 0xb +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12_MASK 0x1000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12__SHIFT 0xc +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13_MASK 0x2000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13__SHIFT 0xd +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14_MASK 0x4000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14__SHIFT 0xe +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15_MASK 0x8000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15__SHIFT 0xf +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16_MASK 0x10000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16__SHIFT 0x10 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17_MASK 0x20000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17__SHIFT 0x11 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18_MASK 0x40000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18__SHIFT 0x12 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19_MASK 0x80000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19__SHIFT 0x13 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20_MASK 0x100000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20__SHIFT 0x14 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21_MASK 0x200000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21__SHIFT 0x15 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22_MASK 0x400000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22__SHIFT 0x16 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23_MASK 0x800000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23__SHIFT 0x17 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24_MASK 0x1000000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24__SHIFT 0x18 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25_MASK 0x2000000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25__SHIFT 0x19 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26_MASK 0x4000000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26__SHIFT 0x1a +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27_MASK 0x8000000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27__SHIFT 0x1b +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28_MASK 0x10000000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28__SHIFT 0x1c +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29_MASK 0x20000000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29__SHIFT 0x1d +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30_MASK 0x40000000 +#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30__SHIFT 0x1e +#define GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN_MASK 0x1fffffff +#define GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN__SHIFT 0x0 +#define GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN_MASK 0x80000000 +#define GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN__SHIFT 0x1f +#define GPIOPAD_INT_STAT__GPIO_INT_STAT_MASK 0x1fffffff +#define GPIOPAD_INT_STAT__GPIO_INT_STAT__SHIFT 0x0 +#define GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT_MASK 0x80000000 +#define GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT__SHIFT 0x1f +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0_MASK 0x1 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0__SHIFT 0x0 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1_MASK 0x2 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1__SHIFT 0x1 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2_MASK 0x4 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2__SHIFT 0x2 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3_MASK 0x8 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3__SHIFT 0x3 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4_MASK 0x10 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4__SHIFT 0x4 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5_MASK 0x20 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5__SHIFT 0x5 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6_MASK 0x40 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6__SHIFT 0x6 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7_MASK 0x80 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7__SHIFT 0x7 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8_MASK 0x100 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8__SHIFT 0x8 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9_MASK 0x200 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9__SHIFT 0x9 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10_MASK 0x400 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10__SHIFT 0xa +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11_MASK 0x800 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11__SHIFT 0xb +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12_MASK 0x1000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12__SHIFT 0xc +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13_MASK 0x2000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13__SHIFT 0xd +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14_MASK 0x4000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14__SHIFT 0xe +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15_MASK 0x8000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15__SHIFT 0xf +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16_MASK 0x10000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16__SHIFT 0x10 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17_MASK 0x20000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17__SHIFT 0x11 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18_MASK 0x40000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18__SHIFT 0x12 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19_MASK 0x80000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19__SHIFT 0x13 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20_MASK 0x100000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20__SHIFT 0x14 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21_MASK 0x200000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21__SHIFT 0x15 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22_MASK 0x400000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22__SHIFT 0x16 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23_MASK 0x800000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23__SHIFT 0x17 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24_MASK 0x1000000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24__SHIFT 0x18 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25_MASK 0x2000000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25__SHIFT 0x19 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26_MASK 0x4000000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26__SHIFT 0x1a +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27_MASK 0x8000000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27__SHIFT 0x1b +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28_MASK 0x10000000 +#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28__SHIFT 0x1c +#define GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK_MASK 0x80000000 +#define GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK__SHIFT 0x1f +#define GPIOPAD_INT_EN__GPIO_INT_EN_MASK 0x1fffffff +#define GPIOPAD_INT_EN__GPIO_INT_EN__SHIFT 0x0 +#define GPIOPAD_INT_EN__SW_INITIATED_INT_EN_MASK 0x80000000 +#define GPIOPAD_INT_EN__SW_INITIATED_INT_EN__SHIFT 0x1f +#define GPIOPAD_INT_TYPE__GPIO_INT_TYPE_MASK 0x1fffffff +#define GPIOPAD_INT_TYPE__GPIO_INT_TYPE__SHIFT 0x0 +#define GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE_MASK 0x80000000 +#define GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE__SHIFT 0x1f +#define GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY_MASK 0x1fffffff +#define GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY__SHIFT 0x0 +#define GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY_MASK 0x80000000 +#define GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY__SHIFT 0x1f +#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_SEL_MASK 0x1f +#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_SEL__SHIFT 0x0 +#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_CLR_MASK 0x20 +#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_CLR__SHIFT 0x5 +#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_READ_MASK 0x40 +#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_READ__SHIFT 0x6 +#define GPIOPAD_RCVR_SEL__GPIO_RCVR_SEL_MASK 0x7fffffff +#define GPIOPAD_RCVR_SEL__GPIO_RCVR_SEL__SHIFT 0x0 +#define GPIOPAD_PU_EN__GPIO_PU_EN_MASK 0x7fffffff +#define GPIOPAD_PU_EN__GPIO_PU_EN__SHIFT 0x0 +#define GPIOPAD_PD_EN__GPIO_PD_EN_MASK 0x7fffffff +#define GPIOPAD_PD_EN__GPIO_PD_EN__SHIFT 0x0 +#define CG_FPS_CNT__FPS_CNT_MASK 0xffffffff +#define CG_FPS_CNT__FPS_CNT__SHIFT 0x0 +#define SMU_IND_INDEX_0__SMC_IND_ADDR_MASK 0xffffffff +#define SMU_IND_INDEX_0__SMC_IND_ADDR__SHIFT 0x0 +#define SMU_IND_DATA_0__SMC_IND_DATA_MASK 0xffffffff +#define SMU_IND_DATA_0__SMC_IND_DATA__SHIFT 0x0 +#define SMU_IND_INDEX_1__SMC_IND_ADDR_MASK 0xffffffff +#define SMU_IND_INDEX_1__SMC_IND_ADDR__SHIFT 0x0 +#define SMU_IND_DATA_1__SMC_IND_DATA_MASK 0xffffffff +#define SMU_IND_DATA_1__SMC_IND_DATA__SHIFT 0x0 +#define SMU_IND_INDEX_2__SMC_IND_ADDR_MASK 0xffffffff +#define SMU_IND_INDEX_2__SMC_IND_ADDR__SHIFT 0x0 +#define SMU_IND_DATA_2__SMC_IND_DATA_MASK 0xffffffff +#define SMU_IND_DATA_2__SMC_IND_DATA__SHIFT 0x0 +#define SMU_IND_INDEX_3__SMC_IND_ADDR_MASK 0xffffffff +#define SMU_IND_INDEX_3__SMC_IND_ADDR__SHIFT 0x0 +#define SMU_IND_DATA_3__SMC_IND_DATA_MASK 0xffffffff +#define SMU_IND_DATA_3__SMC_IND_DATA__SHIFT 0x0 +#define SMU_IND_INDEX_4__SMC_IND_ADDR_MASK 0xffffffff +#define SMU_IND_INDEX_4__SMC_IND_ADDR__SHIFT 0x0 +#define SMU_IND_DATA_4__SMC_IND_DATA_MASK 0xffffffff +#define SMU_IND_DATA_4__SMC_IND_DATA__SHIFT 0x0 +#define SMU_IND_INDEX_5__SMC_IND_ADDR_MASK 0xffffffff +#define SMU_IND_INDEX_5__SMC_IND_ADDR__SHIFT 0x0 +#define SMU_IND_DATA_5__SMC_IND_DATA_MASK 0xffffffff +#define SMU_IND_DATA_5__SMC_IND_DATA__SHIFT 0x0 +#define SMU_IND_INDEX_6__SMC_IND_ADDR_MASK 0xffffffff +#define SMU_IND_INDEX_6__SMC_IND_ADDR__SHIFT 0x0 +#define SMU_IND_DATA_6__SMC_IND_DATA_MASK 0xffffffff +#define SMU_IND_DATA_6__SMC_IND_DATA__SHIFT 0x0 +#define SMU_IND_INDEX_7__SMC_IND_ADDR_MASK 0xffffffff +#define SMU_IND_INDEX_7__SMC_IND_ADDR__SHIFT 0x0 +#define SMU_IND_DATA_7__SMC_IND_DATA_MASK 0xffffffff +#define SMU_IND_DATA_7__SMC_IND_DATA__SHIFT 0x0 +#define SMU_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff +#define SMU_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0 +#define SMU_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff +#define SMU_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0 +#define RCU_UC_EVENTS__RCU_TST_jpc_rep_req_MASK 0x1 +#define RCU_UC_EVENTS__RCU_TST_jpc_rep_req__SHIFT 0x0 +#define RCU_UC_EVENTS__TST_RCU_jpc_rep_done_MASK 0x2 +#define RCU_UC_EVENTS__TST_RCU_jpc_rep_done__SHIFT 0x1 +#define RCU_UC_EVENTS__drv_rst_mode_MASK 0x4 +#define RCU_UC_EVENTS__drv_rst_mode__SHIFT 0x2 +#define RCU_UC_EVENTS__SMU_DC_efuse_status_invalid_MASK 0x8 +#define RCU_UC_EVENTS__SMU_DC_efuse_status_invalid__SHIFT 0x3 +#define RCU_UC_EVENTS__TP_Tester_MASK 0x40 +#define RCU_UC_EVENTS__TP_Tester__SHIFT 0x6 +#define RCU_UC_EVENTS__boot_seq_done_MASK 0x80 +#define RCU_UC_EVENTS__boot_seq_done__SHIFT 0x7 +#define RCU_UC_EVENTS__sclk_deep_sleep_exit_MASK 0x100 +#define RCU_UC_EVENTS__sclk_deep_sleep_exit__SHIFT 0x8 +#define RCU_UC_EVENTS__BREAK_PT1_ACTIVE_MASK 0x200 +#define RCU_UC_EVENTS__BREAK_PT1_ACTIVE__SHIFT 0x9 +#define RCU_UC_EVENTS__BREAK_PT2_ACTIVE_MASK 0x400 +#define RCU_UC_EVENTS__BREAK_PT2_ACTIVE__SHIFT 0xa +#define RCU_UC_EVENTS__FCH_HALT_MASK 0x800 +#define RCU_UC_EVENTS__FCH_HALT__SHIFT 0xb +#define RCU_UC_EVENTS__RCU_GIO_fch_lockdown_MASK 0x2000 +#define RCU_UC_EVENTS__RCU_GIO_fch_lockdown__SHIFT 0xd +#define RCU_UC_EVENTS__INTERRUPTS_ENABLED_MASK 0x10000 +#define RCU_UC_EVENTS__INTERRUPTS_ENABLED__SHIFT 0x10 +#define RCU_UC_EVENTS__RCU_DtmCnt0_Done_MASK 0x20000 +#define RCU_UC_EVENTS__RCU_DtmCnt0_Done__SHIFT 0x11 +#define RCU_UC_EVENTS__RCU_DtmCnt1_Done_MASK 0x40000 +#define RCU_UC_EVENTS__RCU_DtmCnt1_Done__SHIFT 0x12 +#define RCU_UC_EVENTS__RCU_DtmCnt2_Done_MASK 0x80000 +#define RCU_UC_EVENTS__RCU_DtmCnt2_Done__SHIFT 0x13 +#define RCU_UC_EVENTS__irq31_sel_MASK 0x3000000 +#define RCU_UC_EVENTS__irq31_sel__SHIFT 0x18 +#define RCU_MISC_CTRL__REG_DRV_RST_MODE_MASK 0x2 +#define RCU_MISC_CTRL__REG_DRV_RST_MODE__SHIFT 0x1 +#define RCU_MISC_CTRL__REG_RCU_MEMREP_DIS_MASK 0x8 +#define RCU_MISC_CTRL__REG_RCU_MEMREP_DIS__SHIFT 0x3 +#define RCU_MISC_CTRL__REG_CC_FUSE_DISABLE_MASK 0x10 +#define RCU_MISC_CTRL__REG_CC_FUSE_DISABLE__SHIFT 0x4 +#define RCU_MISC_CTRL__REG_SAMU_FUSE_DISABLE_MASK 0x20 +#define RCU_MISC_CTRL__REG_SAMU_FUSE_DISABLE__SHIFT 0x5 +#define RCU_MISC_CTRL__REG_CC_SRBM_RD_DISABLE_MASK 0x100 +#define RCU_MISC_CTRL__REG_CC_SRBM_RD_DISABLE__SHIFT 0x8 +#define RCU_MISC_CTRL__BREAK_PT1_DONE_MASK 0x10000 +#define RCU_MISC_CTRL__BREAK_PT1_DONE__SHIFT 0x10 +#define RCU_MISC_CTRL__BREAK_PT2_DONE_MASK 0x20000 +#define RCU_MISC_CTRL__BREAK_PT2_DONE__SHIFT 0x11 +#define RCU_MISC_CTRL__SAMU_START_MASK 0x400000 +#define RCU_MISC_CTRL__SAMU_START__SHIFT 0x16 +#define RCU_MISC_CTRL__RST_PULSE_WIDTH_MASK 0xff800000 +#define RCU_MISC_CTRL__RST_PULSE_WIDTH__SHIFT 0x17 +#define RCU_VIRT_RESET_REQ__VF_MASK 0xffff +#define RCU_VIRT_RESET_REQ__VF__SHIFT 0x0 +#define RCU_VIRT_RESET_REQ__PF_MASK 0x80000000 +#define RCU_VIRT_RESET_REQ__PF__SHIFT 0x1f +#define CC_RCU_FUSES__GPU_DIS_MASK 0x2 +#define CC_RCU_FUSES__GPU_DIS__SHIFT 0x1 +#define CC_RCU_FUSES__DEBUG_DISABLE_MASK 0x4 +#define CC_RCU_FUSES__DEBUG_DISABLE__SHIFT 0x2 +#define CC_RCU_FUSES__EFUSE_RD_DISABLE_MASK 0x10 +#define CC_RCU_FUSES__EFUSE_RD_DISABLE__SHIFT 0x4 +#define CC_RCU_FUSES__CG_RST_GLB_REQ_DIS_MASK 0x20 +#define CC_RCU_FUSES__CG_RST_GLB_REQ_DIS__SHIFT 0x5 +#define CC_RCU_FUSES__DRV_RST_MODE_MASK 0x40 +#define CC_RCU_FUSES__DRV_RST_MODE__SHIFT 0x6 +#define CC_RCU_FUSES__ROM_DIS_MASK 0x80 +#define CC_RCU_FUSES__ROM_DIS__SHIFT 0x7 +#define CC_RCU_FUSES__JPC_REP_DISABLE_MASK 0x100 +#define CC_RCU_FUSES__JPC_REP_DISABLE__SHIFT 0x8 +#define CC_RCU_FUSES__RCU_BREAK_POINT1_MASK 0x200 +#define CC_RCU_FUSES__RCU_BREAK_POINT1__SHIFT 0x9 +#define CC_RCU_FUSES__RCU_BREAK_POINT2_MASK 0x400 +#define CC_RCU_FUSES__RCU_BREAK_POINT2__SHIFT 0xa +#define CC_RCU_FUSES__SMU_IOC_MST_DISABLE_MASK 0x4000 +#define CC_RCU_FUSES__SMU_IOC_MST_DISABLE__SHIFT 0xe +#define CC_RCU_FUSES__FCH_LOCKOUT_ENABLE_MASK 0x8000 +#define CC_RCU_FUSES__FCH_LOCKOUT_ENABLE__SHIFT 0xf +#define CC_RCU_FUSES__FCH_XFIRE_FILTER_ENABLE_MASK 0x10000 +#define CC_RCU_FUSES__FCH_XFIRE_FILTER_ENABLE__SHIFT 0x10 +#define CC_RCU_FUSES__XFIRE_DISABLE_MASK 0x20000 +#define CC_RCU_FUSES__XFIRE_DISABLE__SHIFT 0x11 +#define CC_RCU_FUSES__SAMU_FUSE_DISABLE_MASK 0x40000 +#define CC_RCU_FUSES__SAMU_FUSE_DISABLE__SHIFT 0x12 +#define CC_RCU_FUSES__BIF_RST_POLLING_DISABLE_MASK 0x80000 +#define CC_RCU_FUSES__BIF_RST_POLLING_DISABLE__SHIFT 0x13 +#define CC_RCU_FUSES__MEM_HARDREP_EN_MASK 0x200000 +#define CC_RCU_FUSES__MEM_HARDREP_EN__SHIFT 0x15 +#define CC_RCU_FUSES__PCIE_INIT_DISABLE_MASK 0x400000 +#define CC_RCU_FUSES__PCIE_INIT_DISABLE__SHIFT 0x16 +#define CC_RCU_FUSES__DSMU_DISABLE_MASK 0x800000 +#define CC_RCU_FUSES__DSMU_DISABLE__SHIFT 0x17 +#define CC_RCU_FUSES__WRP_FUSE_VALID_MASK 0x1000000 +#define CC_RCU_FUSES__WRP_FUSE_VALID__SHIFT 0x18 +#define CC_RCU_FUSES__PHY_FUSE_VALID_MASK 0x2000000 +#define CC_RCU_FUSES__PHY_FUSE_VALID__SHIFT 0x19 +#define CC_RCU_FUSES__RCU_SPARE_MASK 0xfc000000 +#define CC_RCU_FUSES__RCU_SPARE__SHIFT 0x1a +#define CC_SMU_MISC_FUSES__IOMMU_V2_DISABLE_MASK 0x2 +#define CC_SMU_MISC_FUSES__IOMMU_V2_DISABLE__SHIFT 0x1 +#define CC_SMU_MISC_FUSES__MinSClkDid_MASK 0x1fc +#define CC_SMU_MISC_FUSES__MinSClkDid__SHIFT 0x2 +#define CC_SMU_MISC_FUSES__MISC_SPARE_MASK 0x600 +#define CC_SMU_MISC_FUSES__MISC_SPARE__SHIFT 0x9 +#define CC_SMU_MISC_FUSES__PostResetGnbClkDid_MASK 0x3f800 +#define CC_SMU_MISC_FUSES__PostResetGnbClkDid__SHIFT 0xb +#define CC_SMU_MISC_FUSES__L2IMU_tn2_dtc_half_MASK 0x40000 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_dtc_half__SHIFT 0x12 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_half_MASK 0x80000 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_half__SHIFT 0x13 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_half_MASK 0x100000 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_half__SHIFT 0x14 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_pdc_half_MASK 0x200000 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_pdc_half__SHIFT 0x15 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_dis_MASK 0x400000 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_dis__SHIFT 0x16 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_dis_MASK 0x800000 +#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_dis__SHIFT 0x17 +#define CC_SMU_MISC_FUSES__VCE_DISABLE_MASK 0x8000000 +#define CC_SMU_MISC_FUSES__VCE_DISABLE__SHIFT 0x1b +#define CC_SMU_MISC_FUSES__IOC_IOMMU_DISABLE_MASK 0x10000000 +#define CC_SMU_MISC_FUSES__IOC_IOMMU_DISABLE__SHIFT 0x1c +#define CC_SMU_MISC_FUSES__GNB_SPARE_MASK 0x60000000 +#define CC_SMU_MISC_FUSES__GNB_SPARE__SHIFT 0x1d +#define CC_SCLK_VID_FUSES__SClkVid0_MASK 0xff +#define CC_SCLK_VID_FUSES__SClkVid0__SHIFT 0x0 +#define CC_SCLK_VID_FUSES__SClkVid1_MASK 0xff00 +#define CC_SCLK_VID_FUSES__SClkVid1__SHIFT 0x8 +#define CC_SCLK_VID_FUSES__SClkVid2_MASK 0xff0000 +#define CC_SCLK_VID_FUSES__SClkVid2__SHIFT 0x10 +#define CC_SCLK_VID_FUSES__SClkVid3_MASK 0xff000000 +#define CC_SCLK_VID_FUSES__SClkVid3__SHIFT 0x18 +#define CC_GIO_IOCCFG_FUSES__NB_REV_ID_MASK 0x7fe +#define CC_GIO_IOCCFG_FUSES__NB_REV_ID__SHIFT 0x1 +#define CC_GIO_IOC_FUSES__IOC_FUSES_MASK 0x3e +#define CC_GIO_IOC_FUSES__IOC_FUSES__SHIFT 0x1 +#define CC_SMU_TST_EFUSE1_MISC__RF_RM_6_2_MASK 0x3e +#define CC_SMU_TST_EFUSE1_MISC__RF_RM_6_2__SHIFT 0x1 +#define CC_SMU_TST_EFUSE1_MISC__RME_MASK 0x40 +#define CC_SMU_TST_EFUSE1_MISC__RME__SHIFT 0x6 +#define CC_SMU_TST_EFUSE1_MISC__MBIST_DISABLE_MASK 0x80 +#define CC_SMU_TST_EFUSE1_MISC__MBIST_DISABLE__SHIFT 0x7 +#define CC_SMU_TST_EFUSE1_MISC__HARD_REPAIR_DISABLE_MASK 0x100 +#define CC_SMU_TST_EFUSE1_MISC__HARD_REPAIR_DISABLE__SHIFT 0x8 +#define CC_SMU_TST_EFUSE1_MISC__SOFT_REPAIR_DISABLE_MASK 0x200 +#define CC_SMU_TST_EFUSE1_MISC__SOFT_REPAIR_DISABLE__SHIFT 0x9 +#define CC_SMU_TST_EFUSE1_MISC__GPU_DIS_MASK 0x400 +#define CC_SMU_TST_EFUSE1_MISC__GPU_DIS__SHIFT 0xa +#define CC_SMU_TST_EFUSE1_MISC__SMS_PWRDWN_DISABLE_MASK 0x800 +#define CC_SMU_TST_EFUSE1_MISC__SMS_PWRDWN_DISABLE__SHIFT 0xb +#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISA_MASK 0x1000 +#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISA__SHIFT 0xc +#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISB_MASK 0x2000 +#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISB__SHIFT 0xd +#define CC_SMU_TST_EFUSE1_MISC__RM_RF8_MASK 0x4000 +#define CC_SMU_TST_EFUSE1_MISC__RM_RF8__SHIFT 0xe +#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE1_MASK 0x400000 +#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE1__SHIFT 0x16 +#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE2_MASK 0x800000 +#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE2__SHIFT 0x17 +#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE3_MASK 0x1000000 +#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE3__SHIFT 0x18 +#define CC_SMU_TST_EFUSE1_MISC__VCE_DISABLE_MASK 0x2000000 +#define CC_SMU_TST_EFUSE1_MISC__VCE_DISABLE__SHIFT 0x19 +#define CC_SMU_TST_EFUSE1_MISC__DCE_SCAN_DISABLE_MASK 0x4000000 +#define CC_SMU_TST_EFUSE1_MISC__DCE_SCAN_DISABLE__SHIFT 0x1a +#define CC_TST_ID_STRAPS__DEVICE_ID_MASK 0xffff0 +#define CC_TST_ID_STRAPS__DEVICE_ID__SHIFT 0x4 +#define CC_TST_ID_STRAPS__MAJOR_REV_ID_MASK 0xf00000 +#define CC_TST_ID_STRAPS__MAJOR_REV_ID__SHIFT 0x14 +#define CC_TST_ID_STRAPS__MINOR_REV_ID_MASK 0xf000000 +#define CC_TST_ID_STRAPS__MINOR_REV_ID__SHIFT 0x18 +#define CC_TST_ID_STRAPS__ATI_REV_ID_MASK 0xf0000000 +#define CC_TST_ID_STRAPS__ATI_REV_ID__SHIFT 0x1c +#define CC_FCTRL_FUSES__EXT_EFUSE_MACRO_PRESENT_MASK 0x2 +#define CC_FCTRL_FUSES__EXT_EFUSE_MACRO_PRESENT__SHIFT 0x1 +#define CC_HARVEST_FUSES__VCE_DISABLE_MASK 0x6 +#define CC_HARVEST_FUSES__VCE_DISABLE__SHIFT 0x1 +#define CC_HARVEST_FUSES__UVD_DISABLE_MASK 0x10 +#define CC_HARVEST_FUSES__UVD_DISABLE__SHIFT 0x4 +#define CC_HARVEST_FUSES__ACP_DISABLE_MASK 0x40 +#define CC_HARVEST_FUSES__ACP_DISABLE__SHIFT 0x6 +#define CC_HARVEST_FUSES__DC_DISABLE_MASK 0x3f00 +#define CC_HARVEST_FUSES__DC_DISABLE__SHIFT 0x8 +#define SMU_MAIN_PLL_OP_FREQ__PLL_OP_FREQ_MASK 0xffffffff +#define SMU_MAIN_PLL_OP_FREQ__PLL_OP_FREQ__SHIFT 0x0 +#define SMU_STATUS__SMU_DONE_MASK 0x1 +#define SMU_STATUS__SMU_DONE__SHIFT 0x0 +#define SMU_STATUS__SMU_PASS_MASK 0x2 +#define SMU_STATUS__SMU_PASS__SHIFT 0x1 +#define SMU_FIRMWARE__SMU_IN_PROG_MASK 0x1 +#define SMU_FIRMWARE__SMU_IN_PROG__SHIFT 0x0 +#define SMU_FIRMWARE__SMU_RD_DONE_MASK 0x6 +#define SMU_FIRMWARE__SMU_RD_DONE__SHIFT 0x1 +#define SMU_FIRMWARE__SMU_SRAM_RD_BLOCK_EN_MASK 0x8 +#define SMU_FIRMWARE__SMU_SRAM_RD_BLOCK_EN__SHIFT 0x3 +#define SMU_FIRMWARE__SMU_SRAM_WR_BLOCK_EN_MASK 0x10 +#define SMU_FIRMWARE__SMU_SRAM_WR_BLOCK_EN__SHIFT 0x4 +#define SMU_FIRMWARE__SMU_counter_MASK 0xf00 +#define SMU_FIRMWARE__SMU_counter__SHIFT 0x8 +#define SMU_FIRMWARE__SMU_MODE_MASK 0x10000 +#define SMU_FIRMWARE__SMU_MODE__SHIFT 0x10 +#define SMU_FIRMWARE__SMU_SEL_MASK 0x20000 +#define SMU_FIRMWARE__SMU_SEL__SHIFT 0x11 +#define SMU_INPUT_DATA__START_ADDR_MASK 0x7fffffff +#define SMU_INPUT_DATA__START_ADDR__SHIFT 0x0 +#define SMU_INPUT_DATA__AUTO_START_MASK 0x80000000 +#define SMU_INPUT_DATA__AUTO_START__SHIFT 0x1f +#define SMU_EFUSE_0__EFUSE_DATA_MASK 0xffffffff +#define SMU_EFUSE_0__EFUSE_DATA__SHIFT 0x0 +#define FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x1 +#define FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0 +#define FIRMWARE_FLAGS__RESERVED_MASK 0xfffffe +#define FIRMWARE_FLAGS__RESERVED__SHIFT 0x1 +#define FIRMWARE_FLAGS__TEST_COUNT_MASK 0xff000000 +#define FIRMWARE_FLAGS__TEST_COUNT__SHIFT 0x18 +#define TDC_STATUS__VDD_Boost_MASK 0xff +#define TDC_STATUS__VDD_Boost__SHIFT 0x0 +#define TDC_STATUS__VDD_Throttle_MASK 0xff00 +#define TDC_STATUS__VDD_Throttle__SHIFT 0x8 +#define TDC_STATUS__VDDC_Boost_MASK 0xff0000 +#define TDC_STATUS__VDDC_Boost__SHIFT 0x10 +#define TDC_STATUS__VDDC_Throttle_MASK 0xff000000 +#define TDC_STATUS__VDDC_Throttle__SHIFT 0x18 +#define TDC_MV_AVERAGE__IDD_MASK 0xffff +#define TDC_MV_AVERAGE__IDD__SHIFT 0x0 +#define TDC_MV_AVERAGE__IDDC_MASK 0xffff0000 +#define TDC_MV_AVERAGE__IDDC__SHIFT 0x10 +#define TDC_VRM_LIMIT__IDD_MASK 0xffff +#define TDC_VRM_LIMIT__IDD__SHIFT 0x0 +#define TDC_VRM_LIMIT__IDDC_MASK 0xffff0000 +#define TDC_VRM_LIMIT__IDDC__SHIFT 0x10 +#define FEATURE_STATUS__SCLK_DPM_ON_MASK 0x1 +#define FEATURE_STATUS__SCLK_DPM_ON__SHIFT 0x0 +#define FEATURE_STATUS__MCLK_DPM_ON_MASK 0x2 +#define FEATURE_STATUS__MCLK_DPM_ON__SHIFT 0x1 +#define FEATURE_STATUS__LCLK_DPM_ON_MASK 0x4 +#define FEATURE_STATUS__LCLK_DPM_ON__SHIFT 0x2 +#define FEATURE_STATUS__UVD_DPM_ON_MASK 0x8 +#define FEATURE_STATUS__UVD_DPM_ON__SHIFT 0x3 +#define FEATURE_STATUS__VCE_DPM_ON_MASK 0x10 +#define FEATURE_STATUS__VCE_DPM_ON__SHIFT 0x4 +#define FEATURE_STATUS__SAMU_DPM_ON_MASK 0x20 +#define FEATURE_STATUS__SAMU_DPM_ON__SHIFT 0x5 +#define FEATURE_STATUS__ACP_DPM_ON_MASK 0x40 +#define FEATURE_STATUS__ACP_DPM_ON__SHIFT 0x6 +#define FEATURE_STATUS__PCIE_DPM_ON_MASK 0x80 +#define FEATURE_STATUS__PCIE_DPM_ON__SHIFT 0x7 +#define FEATURE_STATUS__BAPM_ON_MASK 0x100 +#define FEATURE_STATUS__BAPM_ON__SHIFT 0x8 +#define FEATURE_STATUS__LPMX_ON_MASK 0x200 +#define FEATURE_STATUS__LPMX_ON__SHIFT 0x9 +#define FEATURE_STATUS__NBDPM_ON_MASK 0x400 +#define FEATURE_STATUS__NBDPM_ON__SHIFT 0xa +#define FEATURE_STATUS__LHTC_ON_MASK 0x800 +#define FEATURE_STATUS__LHTC_ON__SHIFT 0xb +#define FEATURE_STATUS__VPC_ON_MASK 0x1000 +#define FEATURE_STATUS__VPC_ON__SHIFT 0xc +#define FEATURE_STATUS__VOLTAGE_CONTROLLER_ON_MASK 0x2000 +#define FEATURE_STATUS__VOLTAGE_CONTROLLER_ON__SHIFT 0xd +#define FEATURE_STATUS__TDC_LIMIT_ON_MASK 0x4000 +#define FEATURE_STATUS__TDC_LIMIT_ON__SHIFT 0xe +#define FEATURE_STATUS__GPU_CAC_ON_MASK 0x8000 +#define FEATURE_STATUS__GPU_CAC_ON__SHIFT 0xf +#define FEATURE_STATUS__AVS_ON_MASK 0x10000 +#define FEATURE_STATUS__AVS_ON__SHIFT 0x10 +#define FEATURE_STATUS__SPMI_ON_MASK 0x20000 +#define FEATURE_STATUS__SPMI_ON__SHIFT 0x11 +#define FEATURE_STATUS__SCLK_DPM_FORCED_MASK 0x40000 +#define FEATURE_STATUS__SCLK_DPM_FORCED__SHIFT 0x12 +#define FEATURE_STATUS__MCLK_DPM_FORCED_MASK 0x80000 +#define FEATURE_STATUS__MCLK_DPM_FORCED__SHIFT 0x13 +#define FEATURE_STATUS__LCLK_DPM_FORCED_MASK 0x100000 +#define FEATURE_STATUS__LCLK_DPM_FORCED__SHIFT 0x14 +#define FEATURE_STATUS__PCIE_DPM_FORCED_MASK 0x200000 +#define FEATURE_STATUS__PCIE_DPM_FORCED__SHIFT 0x15 +#define FEATURE_STATUS__RESERVED_MASK 0xffc00000 +#define FEATURE_STATUS__RESERVED__SHIFT 0x16 +#define ENTITY_TEMPERATURES_1__GPU_MASK 0xffffffff +#define ENTITY_TEMPERATURES_1__GPU__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_1__entries_0_0_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_1__entries_0_0_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_2__entries_0_0_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_2__entries_0_0_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_4__entries_0_1_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_4__entries_0_1_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_5__entries_0_1_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_5__entries_0_1_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_7__entries_0_2_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_7__entries_0_2_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_8__entries_0_2_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_8__entries_0_2_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_10__entries_0_3_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_10__entries_0_3_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_11__entries_0_3_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_11__entries_0_3_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_13__entries_1_0_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_13__entries_1_0_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_14__entries_1_0_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_14__entries_1_0_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_16__entries_1_1_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_16__entries_1_1_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_17__entries_1_1_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_17__entries_1_1_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_19__entries_1_2_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_19__entries_1_2_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_20__entries_1_2_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_20__entries_1_2_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_22__entries_1_3_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_22__entries_1_3_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_23__entries_1_3_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_23__entries_1_3_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_25__entries_2_0_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_25__entries_2_0_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_26__entries_2_0_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_26__entries_2_0_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_28__entries_2_1_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_28__entries_2_1_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_29__entries_2_1_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_29__entries_2_1_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_31__entries_2_2_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_31__entries_2_2_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_32__entries_2_2_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_32__entries_2_2_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_34__entries_2_3_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_34__entries_2_3_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_35__entries_2_3_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_35__entries_2_3_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_37__entries_3_0_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_37__entries_3_0_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_38__entries_3_0_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_38__entries_3_0_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_40__entries_3_1_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_40__entries_3_1_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_41__entries_3_1_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_41__entries_3_1_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_43__entries_3_2_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_43__entries_3_2_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_44__entries_3_2_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_44__entries_3_2_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_46__entries_3_3_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_46__entries_3_3_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_47__entries_3_3_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_47__entries_3_3_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_49__entries_4_0_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_49__entries_4_0_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_50__entries_4_0_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_50__entries_4_0_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_52__entries_4_1_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_52__entries_4_1_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_53__entries_4_1_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_53__entries_4_1_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_55__entries_4_2_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_55__entries_4_2_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_56__entries_4_2_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_56__entries_4_2_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_58__entries_4_3_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_58__entries_4_3_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_59__entries_4_3_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_59__entries_4_3_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_61__entries_5_0_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_61__entries_5_0_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_62__entries_5_0_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_62__entries_5_0_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_64__entries_5_1_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_64__entries_5_1_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_65__entries_5_1_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_65__entries_5_1_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_67__entries_5_2_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_67__entries_5_2_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_68__entries_5_2_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_68__entries_5_2_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_70__entries_5_3_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_70__entries_5_3_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_71__entries_5_3_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_71__entries_5_3_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_73__entries_6_0_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_73__entries_6_0_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_74__entries_6_0_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_74__entries_6_0_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_76__entries_6_1_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_76__entries_6_1_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_77__entries_6_1_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_77__entries_6_1_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_79__entries_6_2_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_79__entries_6_2_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_80__entries_6_2_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_80__entries_6_2_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_82__entries_6_3_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_82__entries_6_3_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_83__entries_6_3_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_83__entries_6_3_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_85__entries_7_0_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_85__entries_7_0_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_86__entries_7_0_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_86__entries_7_0_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_88__entries_7_1_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_88__entries_7_1_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_89__entries_7_1_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_89__entries_7_1_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_91__entries_7_2_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_91__entries_7_2_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_92__entries_7_2_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_92__entries_7_2_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_McArbBurstTime__SHIFT 0x18 +#define MCARB_DRAM_TIMING_TABLE_94__entries_7_3_McArbDramTiming_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_94__entries_7_3_McArbDramTiming__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_95__entries_7_3_McArbDramTiming2_MASK 0xffffffff +#define MCARB_DRAM_TIMING_TABLE_95__entries_7_3_McArbDramTiming2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_2_MASK 0xff +#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_2__SHIFT 0x0 +#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_1_MASK 0xff00 +#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_1__SHIFT 0x8 +#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_0_MASK 0xff0000 +#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_0__SHIFT 0x10 +#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_McArbBurstTime_MASK 0xff000000 +#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_McArbBurstTime__SHIFT 0x18 +#define DPM_TABLE_1__GraphicsPIDController_Ki_MASK 0xffffffff +#define DPM_TABLE_1__GraphicsPIDController_Ki__SHIFT 0x0 +#define DPM_TABLE_2__GraphicsPIDController_LFWindupUpperLim_MASK 0xffffffff +#define DPM_TABLE_2__GraphicsPIDController_LFWindupUpperLim__SHIFT 0x0 +#define DPM_TABLE_3__GraphicsPIDController_LFWindupLowerLim_MASK 0xffffffff +#define DPM_TABLE_3__GraphicsPIDController_LFWindupLowerLim__SHIFT 0x0 +#define DPM_TABLE_4__GraphicsPIDController_StatePrecision_MASK 0xffffffff +#define DPM_TABLE_4__GraphicsPIDController_StatePrecision__SHIFT 0x0 +#define DPM_TABLE_5__GraphicsPIDController_LfPrecision_MASK 0xffffffff +#define DPM_TABLE_5__GraphicsPIDController_LfPrecision__SHIFT 0x0 +#define DPM_TABLE_6__GraphicsPIDController_LfOffset_MASK 0xffffffff +#define DPM_TABLE_6__GraphicsPIDController_LfOffset__SHIFT 0x0 +#define DPM_TABLE_7__GraphicsPIDController_MaxState_MASK 0xffffffff +#define DPM_TABLE_7__GraphicsPIDController_MaxState__SHIFT 0x0 +#define DPM_TABLE_8__GraphicsPIDController_MaxLfFraction_MASK 0xffffffff +#define DPM_TABLE_8__GraphicsPIDController_MaxLfFraction__SHIFT 0x0 +#define DPM_TABLE_9__GraphicsPIDController_StateShift_MASK 0xffffffff +#define DPM_TABLE_9__GraphicsPIDController_StateShift__SHIFT 0x0 +#define DPM_TABLE_10__MemoryPIDController_Ki_MASK 0xffffffff +#define DPM_TABLE_10__MemoryPIDController_Ki__SHIFT 0x0 +#define DPM_TABLE_11__MemoryPIDController_LFWindupUpperLim_MASK 0xffffffff +#define DPM_TABLE_11__MemoryPIDController_LFWindupUpperLim__SHIFT 0x0 +#define DPM_TABLE_12__MemoryPIDController_LFWindupLowerLim_MASK 0xffffffff +#define DPM_TABLE_12__MemoryPIDController_LFWindupLowerLim__SHIFT 0x0 +#define DPM_TABLE_13__MemoryPIDController_StatePrecision_MASK 0xffffffff +#define DPM_TABLE_13__MemoryPIDController_StatePrecision__SHIFT 0x0 +#define DPM_TABLE_14__MemoryPIDController_LfPrecision_MASK 0xffffffff +#define DPM_TABLE_14__MemoryPIDController_LfPrecision__SHIFT 0x0 +#define DPM_TABLE_15__MemoryPIDController_LfOffset_MASK 0xffffffff +#define DPM_TABLE_15__MemoryPIDController_LfOffset__SHIFT 0x0 +#define DPM_TABLE_16__MemoryPIDController_MaxState_MASK 0xffffffff +#define DPM_TABLE_16__MemoryPIDController_MaxState__SHIFT 0x0 +#define DPM_TABLE_17__MemoryPIDController_MaxLfFraction_MASK 0xffffffff +#define DPM_TABLE_17__MemoryPIDController_MaxLfFraction__SHIFT 0x0 +#define DPM_TABLE_18__MemoryPIDController_StateShift_MASK 0xffffffff +#define DPM_TABLE_18__MemoryPIDController_StateShift__SHIFT 0x0 +#define DPM_TABLE_19__LinkPIDController_Ki_MASK 0xffffffff +#define DPM_TABLE_19__LinkPIDController_Ki__SHIFT 0x0 +#define DPM_TABLE_20__LinkPIDController_LFWindupUpperLim_MASK 0xffffffff +#define DPM_TABLE_20__LinkPIDController_LFWindupUpperLim__SHIFT 0x0 +#define DPM_TABLE_21__LinkPIDController_LFWindupLowerLim_MASK 0xffffffff +#define DPM_TABLE_21__LinkPIDController_LFWindupLowerLim__SHIFT 0x0 +#define DPM_TABLE_22__LinkPIDController_StatePrecision_MASK 0xffffffff +#define DPM_TABLE_22__LinkPIDController_StatePrecision__SHIFT 0x0 +#define DPM_TABLE_23__LinkPIDController_LfPrecision_MASK 0xffffffff +#define DPM_TABLE_23__LinkPIDController_LfPrecision__SHIFT 0x0 +#define DPM_TABLE_24__LinkPIDController_LfOffset_MASK 0xffffffff +#define DPM_TABLE_24__LinkPIDController_LfOffset__SHIFT 0x0 +#define DPM_TABLE_25__LinkPIDController_MaxState_MASK 0xffffffff +#define DPM_TABLE_25__LinkPIDController_MaxState__SHIFT 0x0 +#define DPM_TABLE_26__LinkPIDController_MaxLfFraction_MASK 0xffffffff +#define DPM_TABLE_26__LinkPIDController_MaxLfFraction__SHIFT 0x0 +#define DPM_TABLE_27__LinkPIDController_StateShift_MASK 0xffffffff +#define DPM_TABLE_27__LinkPIDController_StateShift__SHIFT 0x0 +#define DPM_TABLE_28__SystemFlags_MASK 0xffffffff +#define DPM_TABLE_28__SystemFlags__SHIFT 0x0 +#define DPM_TABLE_29__VRConfig_MASK 0xffffffff +#define DPM_TABLE_29__VRConfig__SHIFT 0x0 +#define DPM_TABLE_30__SmioMask1_MASK 0xffffffff +#define DPM_TABLE_30__SmioMask1__SHIFT 0x0 +#define DPM_TABLE_31__SmioMask2_MASK 0xffffffff +#define DPM_TABLE_31__SmioMask2__SHIFT 0x0 +#define DPM_TABLE_32__SmioTable1_Pattern_0_padding_MASK 0xff +#define DPM_TABLE_32__SmioTable1_Pattern_0_padding__SHIFT 0x0 +#define DPM_TABLE_32__SmioTable1_Pattern_0_Smio_MASK 0xff00 +#define DPM_TABLE_32__SmioTable1_Pattern_0_Smio__SHIFT 0x8 +#define DPM_TABLE_32__SmioTable1_Pattern_0_Voltage_MASK 0xffff0000 +#define DPM_TABLE_32__SmioTable1_Pattern_0_Voltage__SHIFT 0x10 +#define DPM_TABLE_33__SmioTable1_Pattern_1_padding_MASK 0xff +#define DPM_TABLE_33__SmioTable1_Pattern_1_padding__SHIFT 0x0 +#define DPM_TABLE_33__SmioTable1_Pattern_1_Smio_MASK 0xff00 +#define DPM_TABLE_33__SmioTable1_Pattern_1_Smio__SHIFT 0x8 +#define DPM_TABLE_33__SmioTable1_Pattern_1_Voltage_MASK 0xffff0000 +#define DPM_TABLE_33__SmioTable1_Pattern_1_Voltage__SHIFT 0x10 +#define DPM_TABLE_34__SmioTable1_Pattern_2_padding_MASK 0xff +#define DPM_TABLE_34__SmioTable1_Pattern_2_padding__SHIFT 0x0 +#define DPM_TABLE_34__SmioTable1_Pattern_2_Smio_MASK 0xff00 +#define DPM_TABLE_34__SmioTable1_Pattern_2_Smio__SHIFT 0x8 +#define DPM_TABLE_34__SmioTable1_Pattern_2_Voltage_MASK 0xffff0000 +#define DPM_TABLE_34__SmioTable1_Pattern_2_Voltage__SHIFT 0x10 +#define DPM_TABLE_35__SmioTable1_Pattern_3_padding_MASK 0xff +#define DPM_TABLE_35__SmioTable1_Pattern_3_padding__SHIFT 0x0 +#define DPM_TABLE_35__SmioTable1_Pattern_3_Smio_MASK 0xff00 +#define DPM_TABLE_35__SmioTable1_Pattern_3_Smio__SHIFT 0x8 +#define DPM_TABLE_35__SmioTable1_Pattern_3_Voltage_MASK 0xffff0000 +#define DPM_TABLE_35__SmioTable1_Pattern_3_Voltage__SHIFT 0x10 +#define DPM_TABLE_36__SmioTable2_Pattern_0_padding_MASK 0xff +#define DPM_TABLE_36__SmioTable2_Pattern_0_padding__SHIFT 0x0 +#define DPM_TABLE_36__SmioTable2_Pattern_0_Smio_MASK 0xff00 +#define DPM_TABLE_36__SmioTable2_Pattern_0_Smio__SHIFT 0x8 +#define DPM_TABLE_36__SmioTable2_Pattern_0_Voltage_MASK 0xffff0000 +#define DPM_TABLE_36__SmioTable2_Pattern_0_Voltage__SHIFT 0x10 +#define DPM_TABLE_37__SmioTable2_Pattern_1_padding_MASK 0xff +#define DPM_TABLE_37__SmioTable2_Pattern_1_padding__SHIFT 0x0 +#define DPM_TABLE_37__SmioTable2_Pattern_1_Smio_MASK 0xff00 +#define DPM_TABLE_37__SmioTable2_Pattern_1_Smio__SHIFT 0x8 +#define DPM_TABLE_37__SmioTable2_Pattern_1_Voltage_MASK 0xffff0000 +#define DPM_TABLE_37__SmioTable2_Pattern_1_Voltage__SHIFT 0x10 +#define DPM_TABLE_38__SmioTable2_Pattern_2_padding_MASK 0xff +#define DPM_TABLE_38__SmioTable2_Pattern_2_padding__SHIFT 0x0 +#define DPM_TABLE_38__SmioTable2_Pattern_2_Smio_MASK 0xff00 +#define DPM_TABLE_38__SmioTable2_Pattern_2_Smio__SHIFT 0x8 +#define DPM_TABLE_38__SmioTable2_Pattern_2_Voltage_MASK 0xffff0000 +#define DPM_TABLE_38__SmioTable2_Pattern_2_Voltage__SHIFT 0x10 +#define DPM_TABLE_39__SmioTable2_Pattern_3_padding_MASK 0xff +#define DPM_TABLE_39__SmioTable2_Pattern_3_padding__SHIFT 0x0 +#define DPM_TABLE_39__SmioTable2_Pattern_3_Smio_MASK 0xff00 +#define DPM_TABLE_39__SmioTable2_Pattern_3_Smio__SHIFT 0x8 +#define DPM_TABLE_39__SmioTable2_Pattern_3_Voltage_MASK 0xffff0000 +#define DPM_TABLE_39__SmioTable2_Pattern_3_Voltage__SHIFT 0x10 +#define DPM_TABLE_40__VddcLevelCount_MASK 0xffffffff +#define DPM_TABLE_40__VddcLevelCount__SHIFT 0x0 +#define DPM_TABLE_41__VddciLevelCount_MASK 0xffffffff +#define DPM_TABLE_41__VddciLevelCount__SHIFT 0x0 +#define DPM_TABLE_42__VddGfxLevelCount_MASK 0xffffffff +#define DPM_TABLE_42__VddGfxLevelCount__SHIFT 0x0 +#define DPM_TABLE_43__MvddLevelCount_MASK 0xffffffff +#define DPM_TABLE_43__MvddLevelCount__SHIFT 0x0 +#define DPM_TABLE_44__VddcTable_1_MASK 0xffff +#define DPM_TABLE_44__VddcTable_1__SHIFT 0x0 +#define DPM_TABLE_44__VddcTable_0_MASK 0xffff0000 +#define DPM_TABLE_44__VddcTable_0__SHIFT 0x10 +#define DPM_TABLE_45__VddcTable_3_MASK 0xffff +#define DPM_TABLE_45__VddcTable_3__SHIFT 0x0 +#define DPM_TABLE_45__VddcTable_2_MASK 0xffff0000 +#define DPM_TABLE_45__VddcTable_2__SHIFT 0x10 +#define DPM_TABLE_46__VddcTable_5_MASK 0xffff +#define DPM_TABLE_46__VddcTable_5__SHIFT 0x0 +#define DPM_TABLE_46__VddcTable_4_MASK 0xffff0000 +#define DPM_TABLE_46__VddcTable_4__SHIFT 0x10 +#define DPM_TABLE_47__VddcTable_7_MASK 0xffff +#define DPM_TABLE_47__VddcTable_7__SHIFT 0x0 +#define DPM_TABLE_47__VddcTable_6_MASK 0xffff0000 +#define DPM_TABLE_47__VddcTable_6__SHIFT 0x10 +#define DPM_TABLE_48__VddcTable_9_MASK 0xffff +#define DPM_TABLE_48__VddcTable_9__SHIFT 0x0 +#define DPM_TABLE_48__VddcTable_8_MASK 0xffff0000 +#define DPM_TABLE_48__VddcTable_8__SHIFT 0x10 +#define DPM_TABLE_49__VddcTable_11_MASK 0xffff +#define DPM_TABLE_49__VddcTable_11__SHIFT 0x0 +#define DPM_TABLE_49__VddcTable_10_MASK 0xffff0000 +#define DPM_TABLE_49__VddcTable_10__SHIFT 0x10 +#define DPM_TABLE_50__VddcTable_13_MASK 0xffff +#define DPM_TABLE_50__VddcTable_13__SHIFT 0x0 +#define DPM_TABLE_50__VddcTable_12_MASK 0xffff0000 +#define DPM_TABLE_50__VddcTable_12__SHIFT 0x10 +#define DPM_TABLE_51__VddcTable_15_MASK 0xffff +#define DPM_TABLE_51__VddcTable_15__SHIFT 0x0 +#define DPM_TABLE_51__VddcTable_14_MASK 0xffff0000 +#define DPM_TABLE_51__VddcTable_14__SHIFT 0x10 +#define DPM_TABLE_52__VddGfxTable_1_MASK 0xffff +#define DPM_TABLE_52__VddGfxTable_1__SHIFT 0x0 +#define DPM_TABLE_52__VddGfxTable_0_MASK 0xffff0000 +#define DPM_TABLE_52__VddGfxTable_0__SHIFT 0x10 +#define DPM_TABLE_53__VddGfxTable_3_MASK 0xffff +#define DPM_TABLE_53__VddGfxTable_3__SHIFT 0x0 +#define DPM_TABLE_53__VddGfxTable_2_MASK 0xffff0000 +#define DPM_TABLE_53__VddGfxTable_2__SHIFT 0x10 +#define DPM_TABLE_54__VddGfxTable_5_MASK 0xffff +#define DPM_TABLE_54__VddGfxTable_5__SHIFT 0x0 +#define DPM_TABLE_54__VddGfxTable_4_MASK 0xffff0000 +#define DPM_TABLE_54__VddGfxTable_4__SHIFT 0x10 +#define DPM_TABLE_55__VddGfxTable_7_MASK 0xffff +#define DPM_TABLE_55__VddGfxTable_7__SHIFT 0x0 +#define DPM_TABLE_55__VddGfxTable_6_MASK 0xffff0000 +#define DPM_TABLE_55__VddGfxTable_6__SHIFT 0x10 +#define DPM_TABLE_56__VddGfxTable_9_MASK 0xffff +#define DPM_TABLE_56__VddGfxTable_9__SHIFT 0x0 +#define DPM_TABLE_56__VddGfxTable_8_MASK 0xffff0000 +#define DPM_TABLE_56__VddGfxTable_8__SHIFT 0x10 +#define DPM_TABLE_57__VddGfxTable_11_MASK 0xffff +#define DPM_TABLE_57__VddGfxTable_11__SHIFT 0x0 +#define DPM_TABLE_57__VddGfxTable_10_MASK 0xffff0000 +#define DPM_TABLE_57__VddGfxTable_10__SHIFT 0x10 +#define DPM_TABLE_58__VddGfxTable_13_MASK 0xffff +#define DPM_TABLE_58__VddGfxTable_13__SHIFT 0x0 +#define DPM_TABLE_58__VddGfxTable_12_MASK 0xffff0000 +#define DPM_TABLE_58__VddGfxTable_12__SHIFT 0x10 +#define DPM_TABLE_59__VddGfxTable_15_MASK 0xffff +#define DPM_TABLE_59__VddGfxTable_15__SHIFT 0x0 +#define DPM_TABLE_59__VddGfxTable_14_MASK 0xffff0000 +#define DPM_TABLE_59__VddGfxTable_14__SHIFT 0x10 +#define DPM_TABLE_60__VddciTable_1_MASK 0xffff +#define DPM_TABLE_60__VddciTable_1__SHIFT 0x0 +#define DPM_TABLE_60__VddciTable_0_MASK 0xffff0000 +#define DPM_TABLE_60__VddciTable_0__SHIFT 0x10 +#define DPM_TABLE_61__VddciTable_3_MASK 0xffff +#define DPM_TABLE_61__VddciTable_3__SHIFT 0x0 +#define DPM_TABLE_61__VddciTable_2_MASK 0xffff0000 +#define DPM_TABLE_61__VddciTable_2__SHIFT 0x10 +#define DPM_TABLE_62__VddciTable_5_MASK 0xffff +#define DPM_TABLE_62__VddciTable_5__SHIFT 0x0 +#define DPM_TABLE_62__VddciTable_4_MASK 0xffff0000 +#define DPM_TABLE_62__VddciTable_4__SHIFT 0x10 +#define DPM_TABLE_63__VddciTable_7_MASK 0xffff +#define DPM_TABLE_63__VddciTable_7__SHIFT 0x0 +#define DPM_TABLE_63__VddciTable_6_MASK 0xffff0000 +#define DPM_TABLE_63__VddciTable_6__SHIFT 0x10 +#define DPM_TABLE_64__BapmVddGfxVidHiSidd_3_MASK 0xff +#define DPM_TABLE_64__BapmVddGfxVidHiSidd_3__SHIFT 0x0 +#define DPM_TABLE_64__BapmVddGfxVidHiSidd_2_MASK 0xff00 +#define DPM_TABLE_64__BapmVddGfxVidHiSidd_2__SHIFT 0x8 +#define DPM_TABLE_64__BapmVddGfxVidHiSidd_1_MASK 0xff0000 +#define DPM_TABLE_64__BapmVddGfxVidHiSidd_1__SHIFT 0x10 +#define DPM_TABLE_64__BapmVddGfxVidHiSidd_0_MASK 0xff000000 +#define DPM_TABLE_64__BapmVddGfxVidHiSidd_0__SHIFT 0x18 +#define DPM_TABLE_65__BapmVddGfxVidHiSidd_7_MASK 0xff +#define DPM_TABLE_65__BapmVddGfxVidHiSidd_7__SHIFT 0x0 +#define DPM_TABLE_65__BapmVddGfxVidHiSidd_6_MASK 0xff00 +#define DPM_TABLE_65__BapmVddGfxVidHiSidd_6__SHIFT 0x8 +#define DPM_TABLE_65__BapmVddGfxVidHiSidd_5_MASK 0xff0000 +#define DPM_TABLE_65__BapmVddGfxVidHiSidd_5__SHIFT 0x10 +#define DPM_TABLE_65__BapmVddGfxVidHiSidd_4_MASK 0xff000000 +#define DPM_TABLE_65__BapmVddGfxVidHiSidd_4__SHIFT 0x18 +#define DPM_TABLE_66__BapmVddGfxVidHiSidd_11_MASK 0xff +#define DPM_TABLE_66__BapmVddGfxVidHiSidd_11__SHIFT 0x0 +#define DPM_TABLE_66__BapmVddGfxVidHiSidd_10_MASK 0xff00 +#define DPM_TABLE_66__BapmVddGfxVidHiSidd_10__SHIFT 0x8 +#define DPM_TABLE_66__BapmVddGfxVidHiSidd_9_MASK 0xff0000 +#define DPM_TABLE_66__BapmVddGfxVidHiSidd_9__SHIFT 0x10 +#define DPM_TABLE_66__BapmVddGfxVidHiSidd_8_MASK 0xff000000 +#define DPM_TABLE_66__BapmVddGfxVidHiSidd_8__SHIFT 0x18 +#define DPM_TABLE_67__BapmVddGfxVidHiSidd_15_MASK 0xff +#define DPM_TABLE_67__BapmVddGfxVidHiSidd_15__SHIFT 0x0 +#define DPM_TABLE_67__BapmVddGfxVidHiSidd_14_MASK 0xff00 +#define DPM_TABLE_67__BapmVddGfxVidHiSidd_14__SHIFT 0x8 +#define DPM_TABLE_67__BapmVddGfxVidHiSidd_13_MASK 0xff0000 +#define DPM_TABLE_67__BapmVddGfxVidHiSidd_13__SHIFT 0x10 +#define DPM_TABLE_67__BapmVddGfxVidHiSidd_12_MASK 0xff000000 +#define DPM_TABLE_67__BapmVddGfxVidHiSidd_12__SHIFT 0x18 +#define DPM_TABLE_68__BapmVddGfxVidLoSidd_3_MASK 0xff +#define DPM_TABLE_68__BapmVddGfxVidLoSidd_3__SHIFT 0x0 +#define DPM_TABLE_68__BapmVddGfxVidLoSidd_2_MASK 0xff00 +#define DPM_TABLE_68__BapmVddGfxVidLoSidd_2__SHIFT 0x8 +#define DPM_TABLE_68__BapmVddGfxVidLoSidd_1_MASK 0xff0000 +#define DPM_TABLE_68__BapmVddGfxVidLoSidd_1__SHIFT 0x10 +#define DPM_TABLE_68__BapmVddGfxVidLoSidd_0_MASK 0xff000000 +#define DPM_TABLE_68__BapmVddGfxVidLoSidd_0__SHIFT 0x18 +#define DPM_TABLE_69__BapmVddGfxVidLoSidd_7_MASK 0xff +#define DPM_TABLE_69__BapmVddGfxVidLoSidd_7__SHIFT 0x0 +#define DPM_TABLE_69__BapmVddGfxVidLoSidd_6_MASK 0xff00 +#define DPM_TABLE_69__BapmVddGfxVidLoSidd_6__SHIFT 0x8 +#define DPM_TABLE_69__BapmVddGfxVidLoSidd_5_MASK 0xff0000 +#define DPM_TABLE_69__BapmVddGfxVidLoSidd_5__SHIFT 0x10 +#define DPM_TABLE_69__BapmVddGfxVidLoSidd_4_MASK 0xff000000 +#define DPM_TABLE_69__BapmVddGfxVidLoSidd_4__SHIFT 0x18 +#define DPM_TABLE_70__BapmVddGfxVidLoSidd_11_MASK 0xff +#define DPM_TABLE_70__BapmVddGfxVidLoSidd_11__SHIFT 0x0 +#define DPM_TABLE_70__BapmVddGfxVidLoSidd_10_MASK 0xff00 +#define DPM_TABLE_70__BapmVddGfxVidLoSidd_10__SHIFT 0x8 +#define DPM_TABLE_70__BapmVddGfxVidLoSidd_9_MASK 0xff0000 +#define DPM_TABLE_70__BapmVddGfxVidLoSidd_9__SHIFT 0x10 +#define DPM_TABLE_70__BapmVddGfxVidLoSidd_8_MASK 0xff000000 +#define DPM_TABLE_70__BapmVddGfxVidLoSidd_8__SHIFT 0x18 +#define DPM_TABLE_71__BapmVddGfxVidLoSidd_15_MASK 0xff +#define DPM_TABLE_71__BapmVddGfxVidLoSidd_15__SHIFT 0x0 +#define DPM_TABLE_71__BapmVddGfxVidLoSidd_14_MASK 0xff00 +#define DPM_TABLE_71__BapmVddGfxVidLoSidd_14__SHIFT 0x8 +#define DPM_TABLE_71__BapmVddGfxVidLoSidd_13_MASK 0xff0000 +#define DPM_TABLE_71__BapmVddGfxVidLoSidd_13__SHIFT 0x10 +#define DPM_TABLE_71__BapmVddGfxVidLoSidd_12_MASK 0xff000000 +#define DPM_TABLE_71__BapmVddGfxVidLoSidd_12__SHIFT 0x18 +#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_3_MASK 0xff +#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_3__SHIFT 0x0 +#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_2_MASK 0xff00 +#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_2__SHIFT 0x8 +#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_1_MASK 0xff0000 +#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_1__SHIFT 0x10 +#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_0_MASK 0xff000000 +#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_0__SHIFT 0x18 +#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_7_MASK 0xff +#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_7__SHIFT 0x0 +#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_6_MASK 0xff00 +#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_6__SHIFT 0x8 +#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_5_MASK 0xff0000 +#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_5__SHIFT 0x10 +#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_4_MASK 0xff000000 +#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_4__SHIFT 0x18 +#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_11_MASK 0xff +#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_11__SHIFT 0x0 +#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_10_MASK 0xff00 +#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_10__SHIFT 0x8 +#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_9_MASK 0xff0000 +#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_9__SHIFT 0x10 +#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_8_MASK 0xff000000 +#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_8__SHIFT 0x18 +#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_15_MASK 0xff +#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_15__SHIFT 0x0 +#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_14_MASK 0xff00 +#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_14__SHIFT 0x8 +#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_13_MASK 0xff0000 +#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_13__SHIFT 0x10 +#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_12_MASK 0xff000000 +#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_12__SHIFT 0x18 +#define DPM_TABLE_76__BapmVddcVidHiSidd_3_MASK 0xff +#define DPM_TABLE_76__BapmVddcVidHiSidd_3__SHIFT 0x0 +#define DPM_TABLE_76__BapmVddcVidHiSidd_2_MASK 0xff00 +#define DPM_TABLE_76__BapmVddcVidHiSidd_2__SHIFT 0x8 +#define DPM_TABLE_76__BapmVddcVidHiSidd_1_MASK 0xff0000 +#define DPM_TABLE_76__BapmVddcVidHiSidd_1__SHIFT 0x10 +#define DPM_TABLE_76__BapmVddcVidHiSidd_0_MASK 0xff000000 +#define DPM_TABLE_76__BapmVddcVidHiSidd_0__SHIFT 0x18 +#define DPM_TABLE_77__BapmVddcVidHiSidd_7_MASK 0xff +#define DPM_TABLE_77__BapmVddcVidHiSidd_7__SHIFT 0x0 +#define DPM_TABLE_77__BapmVddcVidHiSidd_6_MASK 0xff00 +#define DPM_TABLE_77__BapmVddcVidHiSidd_6__SHIFT 0x8 +#define DPM_TABLE_77__BapmVddcVidHiSidd_5_MASK 0xff0000 +#define DPM_TABLE_77__BapmVddcVidHiSidd_5__SHIFT 0x10 +#define DPM_TABLE_77__BapmVddcVidHiSidd_4_MASK 0xff000000 +#define DPM_TABLE_77__BapmVddcVidHiSidd_4__SHIFT 0x18 +#define DPM_TABLE_78__BapmVddcVidHiSidd_11_MASK 0xff +#define DPM_TABLE_78__BapmVddcVidHiSidd_11__SHIFT 0x0 +#define DPM_TABLE_78__BapmVddcVidHiSidd_10_MASK 0xff00 +#define DPM_TABLE_78__BapmVddcVidHiSidd_10__SHIFT 0x8 +#define DPM_TABLE_78__BapmVddcVidHiSidd_9_MASK 0xff0000 +#define DPM_TABLE_78__BapmVddcVidHiSidd_9__SHIFT 0x10 +#define DPM_TABLE_78__BapmVddcVidHiSidd_8_MASK 0xff000000 +#define DPM_TABLE_78__BapmVddcVidHiSidd_8__SHIFT 0x18 +#define DPM_TABLE_79__BapmVddcVidHiSidd_15_MASK 0xff +#define DPM_TABLE_79__BapmVddcVidHiSidd_15__SHIFT 0x0 +#define DPM_TABLE_79__BapmVddcVidHiSidd_14_MASK 0xff00 +#define DPM_TABLE_79__BapmVddcVidHiSidd_14__SHIFT 0x8 +#define DPM_TABLE_79__BapmVddcVidHiSidd_13_MASK 0xff0000 +#define DPM_TABLE_79__BapmVddcVidHiSidd_13__SHIFT 0x10 +#define DPM_TABLE_79__BapmVddcVidHiSidd_12_MASK 0xff000000 +#define DPM_TABLE_79__BapmVddcVidHiSidd_12__SHIFT 0x18 +#define DPM_TABLE_80__BapmVddcVidLoSidd_3_MASK 0xff +#define DPM_TABLE_80__BapmVddcVidLoSidd_3__SHIFT 0x0 +#define DPM_TABLE_80__BapmVddcVidLoSidd_2_MASK 0xff00 +#define DPM_TABLE_80__BapmVddcVidLoSidd_2__SHIFT 0x8 +#define DPM_TABLE_80__BapmVddcVidLoSidd_1_MASK 0xff0000 +#define DPM_TABLE_80__BapmVddcVidLoSidd_1__SHIFT 0x10 +#define DPM_TABLE_80__BapmVddcVidLoSidd_0_MASK 0xff000000 +#define DPM_TABLE_80__BapmVddcVidLoSidd_0__SHIFT 0x18 +#define DPM_TABLE_81__BapmVddcVidLoSidd_7_MASK 0xff +#define DPM_TABLE_81__BapmVddcVidLoSidd_7__SHIFT 0x0 +#define DPM_TABLE_81__BapmVddcVidLoSidd_6_MASK 0xff00 +#define DPM_TABLE_81__BapmVddcVidLoSidd_6__SHIFT 0x8 +#define DPM_TABLE_81__BapmVddcVidLoSidd_5_MASK 0xff0000 +#define DPM_TABLE_81__BapmVddcVidLoSidd_5__SHIFT 0x10 +#define DPM_TABLE_81__BapmVddcVidLoSidd_4_MASK 0xff000000 +#define DPM_TABLE_81__BapmVddcVidLoSidd_4__SHIFT 0x18 +#define DPM_TABLE_82__BapmVddcVidLoSidd_11_MASK 0xff +#define DPM_TABLE_82__BapmVddcVidLoSidd_11__SHIFT 0x0 +#define DPM_TABLE_82__BapmVddcVidLoSidd_10_MASK 0xff00 +#define DPM_TABLE_82__BapmVddcVidLoSidd_10__SHIFT 0x8 +#define DPM_TABLE_82__BapmVddcVidLoSidd_9_MASK 0xff0000 +#define DPM_TABLE_82__BapmVddcVidLoSidd_9__SHIFT 0x10 +#define DPM_TABLE_82__BapmVddcVidLoSidd_8_MASK 0xff000000 +#define DPM_TABLE_82__BapmVddcVidLoSidd_8__SHIFT 0x18 +#define DPM_TABLE_83__BapmVddcVidLoSidd_15_MASK 0xff +#define DPM_TABLE_83__BapmVddcVidLoSidd_15__SHIFT 0x0 +#define DPM_TABLE_83__BapmVddcVidLoSidd_14_MASK 0xff00 +#define DPM_TABLE_83__BapmVddcVidLoSidd_14__SHIFT 0x8 +#define DPM_TABLE_83__BapmVddcVidLoSidd_13_MASK 0xff0000 +#define DPM_TABLE_83__BapmVddcVidLoSidd_13__SHIFT 0x10 +#define DPM_TABLE_83__BapmVddcVidLoSidd_12_MASK 0xff000000 +#define DPM_TABLE_83__BapmVddcVidLoSidd_12__SHIFT 0x18 +#define DPM_TABLE_84__BapmVddcVidHiSidd2_3_MASK 0xff +#define DPM_TABLE_84__BapmVddcVidHiSidd2_3__SHIFT 0x0 +#define DPM_TABLE_84__BapmVddcVidHiSidd2_2_MASK 0xff00 +#define DPM_TABLE_84__BapmVddcVidHiSidd2_2__SHIFT 0x8 +#define DPM_TABLE_84__BapmVddcVidHiSidd2_1_MASK 0xff0000 +#define DPM_TABLE_84__BapmVddcVidHiSidd2_1__SHIFT 0x10 +#define DPM_TABLE_84__BapmVddcVidHiSidd2_0_MASK 0xff000000 +#define DPM_TABLE_84__BapmVddcVidHiSidd2_0__SHIFT 0x18 +#define DPM_TABLE_85__BapmVddcVidHiSidd2_7_MASK 0xff +#define DPM_TABLE_85__BapmVddcVidHiSidd2_7__SHIFT 0x0 +#define DPM_TABLE_85__BapmVddcVidHiSidd2_6_MASK 0xff00 +#define DPM_TABLE_85__BapmVddcVidHiSidd2_6__SHIFT 0x8 +#define DPM_TABLE_85__BapmVddcVidHiSidd2_5_MASK 0xff0000 +#define DPM_TABLE_85__BapmVddcVidHiSidd2_5__SHIFT 0x10 +#define DPM_TABLE_85__BapmVddcVidHiSidd2_4_MASK 0xff000000 +#define DPM_TABLE_85__BapmVddcVidHiSidd2_4__SHIFT 0x18 +#define DPM_TABLE_86__BapmVddcVidHiSidd2_11_MASK 0xff +#define DPM_TABLE_86__BapmVddcVidHiSidd2_11__SHIFT 0x0 +#define DPM_TABLE_86__BapmVddcVidHiSidd2_10_MASK 0xff00 +#define DPM_TABLE_86__BapmVddcVidHiSidd2_10__SHIFT 0x8 +#define DPM_TABLE_86__BapmVddcVidHiSidd2_9_MASK 0xff0000 +#define DPM_TABLE_86__BapmVddcVidHiSidd2_9__SHIFT 0x10 +#define DPM_TABLE_86__BapmVddcVidHiSidd2_8_MASK 0xff000000 +#define DPM_TABLE_86__BapmVddcVidHiSidd2_8__SHIFT 0x18 +#define DPM_TABLE_87__BapmVddcVidHiSidd2_15_MASK 0xff +#define DPM_TABLE_87__BapmVddcVidHiSidd2_15__SHIFT 0x0 +#define DPM_TABLE_87__BapmVddcVidHiSidd2_14_MASK 0xff00 +#define DPM_TABLE_87__BapmVddcVidHiSidd2_14__SHIFT 0x8 +#define DPM_TABLE_87__BapmVddcVidHiSidd2_13_MASK 0xff0000 +#define DPM_TABLE_87__BapmVddcVidHiSidd2_13__SHIFT 0x10 +#define DPM_TABLE_87__BapmVddcVidHiSidd2_12_MASK 0xff000000 +#define DPM_TABLE_87__BapmVddcVidHiSidd2_12__SHIFT 0x18 +#define DPM_TABLE_88__MasterDeepSleepControl_MASK 0xff +#define DPM_TABLE_88__MasterDeepSleepControl__SHIFT 0x0 +#define DPM_TABLE_88__LinkLevelCount_MASK 0xff00 +#define DPM_TABLE_88__LinkLevelCount__SHIFT 0x8 +#define DPM_TABLE_88__MemoryDpmLevelCount_MASK 0xff0000 +#define DPM_TABLE_88__MemoryDpmLevelCount__SHIFT 0x10 +#define DPM_TABLE_88__GraphicsDpmLevelCount_MASK 0xff000000 +#define DPM_TABLE_88__GraphicsDpmLevelCount__SHIFT 0x18 +#define DPM_TABLE_89__SamuLevelCount_MASK 0xff +#define DPM_TABLE_89__SamuLevelCount__SHIFT 0x0 +#define DPM_TABLE_89__AcpLevelCount_MASK 0xff00 +#define DPM_TABLE_89__AcpLevelCount__SHIFT 0x8 +#define DPM_TABLE_89__VceLevelCount_MASK 0xff0000 +#define DPM_TABLE_89__VceLevelCount__SHIFT 0x10 +#define DPM_TABLE_89__UvdLevelCount_MASK 0xff000000 +#define DPM_TABLE_89__UvdLevelCount__SHIFT 0x18 +#define DPM_TABLE_90__Reserved_0_MASK 0xff +#define DPM_TABLE_90__Reserved_0__SHIFT 0x0 +#define DPM_TABLE_90__ThermOutMode_MASK 0xff00 +#define DPM_TABLE_90__ThermOutMode__SHIFT 0x8 +#define DPM_TABLE_90__ThermOutPolarity_MASK 0xff0000 +#define DPM_TABLE_90__ThermOutPolarity__SHIFT 0x10 +#define DPM_TABLE_90__ThermOutGpio_MASK 0xff000000 +#define DPM_TABLE_90__ThermOutGpio__SHIFT 0x18 +#define DPM_TABLE_91__Reserved_0_MASK 0xffffffff +#define DPM_TABLE_91__Reserved_0__SHIFT 0x0 +#define DPM_TABLE_92__Reserved_1_MASK 0xffffffff +#define DPM_TABLE_92__Reserved_1__SHIFT 0x0 +#define DPM_TABLE_93__Reserved_2_MASK 0xffffffff +#define DPM_TABLE_93__Reserved_2__SHIFT 0x0 +#define DPM_TABLE_94__Reserved_3_MASK 0xffffffff +#define DPM_TABLE_94__Reserved_3__SHIFT 0x0 +#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_96__GraphicsLevel_0_SclkFrequency_MASK 0xffffffff +#define DPM_TABLE_96__GraphicsLevel_0_SclkFrequency__SHIFT 0x0 +#define DPM_TABLE_97__GraphicsLevel_0_ActivityLevel_MASK 0xffff +#define DPM_TABLE_97__GraphicsLevel_0_ActivityLevel__SHIFT 0x0 +#define DPM_TABLE_97__GraphicsLevel_0_DeepSleepDivId_MASK 0xff0000 +#define DPM_TABLE_97__GraphicsLevel_0_DeepSleepDivId__SHIFT 0x10 +#define DPM_TABLE_97__GraphicsLevel_0_pcieDpmLevel_MASK 0xff000000 +#define DPM_TABLE_97__GraphicsLevel_0_pcieDpmLevel__SHIFT 0x18 +#define DPM_TABLE_98__GraphicsLevel_0_CgSpllFuncCntl3_MASK 0xffffffff +#define DPM_TABLE_98__GraphicsLevel_0_CgSpllFuncCntl3__SHIFT 0x0 +#define DPM_TABLE_99__GraphicsLevel_0_CgSpllFuncCntl4_MASK 0xffffffff +#define DPM_TABLE_99__GraphicsLevel_0_CgSpllFuncCntl4__SHIFT 0x0 +#define DPM_TABLE_100__GraphicsLevel_0_SpllSpreadSpectrum_MASK 0xffffffff +#define DPM_TABLE_100__GraphicsLevel_0_SpllSpreadSpectrum__SHIFT 0x0 +#define DPM_TABLE_101__GraphicsLevel_0_SpllSpreadSpectrum2_MASK 0xffffffff +#define DPM_TABLE_101__GraphicsLevel_0_SpllSpreadSpectrum2__SHIFT 0x0 +#define DPM_TABLE_102__GraphicsLevel_0_CcPwrDynRm_MASK 0xffffffff +#define DPM_TABLE_102__GraphicsLevel_0_CcPwrDynRm__SHIFT 0x0 +#define DPM_TABLE_103__GraphicsLevel_0_CcPwrDynRm1_MASK 0xffffffff +#define DPM_TABLE_103__GraphicsLevel_0_CcPwrDynRm1__SHIFT 0x0 +#define DPM_TABLE_104__GraphicsLevel_0_EnabledForThrottle_MASK 0xff +#define DPM_TABLE_104__GraphicsLevel_0_EnabledForThrottle__SHIFT 0x0 +#define DPM_TABLE_104__GraphicsLevel_0_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_104__GraphicsLevel_0_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_104__GraphicsLevel_0_DisplayWatermark_MASK 0xff0000 +#define DPM_TABLE_104__GraphicsLevel_0_DisplayWatermark__SHIFT 0x10 +#define DPM_TABLE_104__GraphicsLevel_0_SclkDid_MASK 0xff000000 +#define DPM_TABLE_104__GraphicsLevel_0_SclkDid__SHIFT 0x18 +#define DPM_TABLE_105__GraphicsLevel_0_PowerThrottle_MASK 0xff +#define DPM_TABLE_105__GraphicsLevel_0_PowerThrottle__SHIFT 0x0 +#define DPM_TABLE_105__GraphicsLevel_0_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_105__GraphicsLevel_0_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_105__GraphicsLevel_0_DownHyst_MASK 0xff0000 +#define DPM_TABLE_105__GraphicsLevel_0_DownHyst__SHIFT 0x10 +#define DPM_TABLE_105__GraphicsLevel_0_UpHyst_MASK 0xff000000 +#define DPM_TABLE_105__GraphicsLevel_0_UpHyst__SHIFT 0x18 +#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_107__GraphicsLevel_1_SclkFrequency_MASK 0xffffffff +#define DPM_TABLE_107__GraphicsLevel_1_SclkFrequency__SHIFT 0x0 +#define DPM_TABLE_108__GraphicsLevel_1_ActivityLevel_MASK 0xffff +#define DPM_TABLE_108__GraphicsLevel_1_ActivityLevel__SHIFT 0x0 +#define DPM_TABLE_108__GraphicsLevel_1_DeepSleepDivId_MASK 0xff0000 +#define DPM_TABLE_108__GraphicsLevel_1_DeepSleepDivId__SHIFT 0x10 +#define DPM_TABLE_108__GraphicsLevel_1_pcieDpmLevel_MASK 0xff000000 +#define DPM_TABLE_108__GraphicsLevel_1_pcieDpmLevel__SHIFT 0x18 +#define DPM_TABLE_109__GraphicsLevel_1_CgSpllFuncCntl3_MASK 0xffffffff +#define DPM_TABLE_109__GraphicsLevel_1_CgSpllFuncCntl3__SHIFT 0x0 +#define DPM_TABLE_110__GraphicsLevel_1_CgSpllFuncCntl4_MASK 0xffffffff +#define DPM_TABLE_110__GraphicsLevel_1_CgSpllFuncCntl4__SHIFT 0x0 +#define DPM_TABLE_111__GraphicsLevel_1_SpllSpreadSpectrum_MASK 0xffffffff +#define DPM_TABLE_111__GraphicsLevel_1_SpllSpreadSpectrum__SHIFT 0x0 +#define DPM_TABLE_112__GraphicsLevel_1_SpllSpreadSpectrum2_MASK 0xffffffff +#define DPM_TABLE_112__GraphicsLevel_1_SpllSpreadSpectrum2__SHIFT 0x0 +#define DPM_TABLE_113__GraphicsLevel_1_CcPwrDynRm_MASK 0xffffffff +#define DPM_TABLE_113__GraphicsLevel_1_CcPwrDynRm__SHIFT 0x0 +#define DPM_TABLE_114__GraphicsLevel_1_CcPwrDynRm1_MASK 0xffffffff +#define DPM_TABLE_114__GraphicsLevel_1_CcPwrDynRm1__SHIFT 0x0 +#define DPM_TABLE_115__GraphicsLevel_1_EnabledForThrottle_MASK 0xff +#define DPM_TABLE_115__GraphicsLevel_1_EnabledForThrottle__SHIFT 0x0 +#define DPM_TABLE_115__GraphicsLevel_1_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_115__GraphicsLevel_1_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_115__GraphicsLevel_1_DisplayWatermark_MASK 0xff0000 +#define DPM_TABLE_115__GraphicsLevel_1_DisplayWatermark__SHIFT 0x10 +#define DPM_TABLE_115__GraphicsLevel_1_SclkDid_MASK 0xff000000 +#define DPM_TABLE_115__GraphicsLevel_1_SclkDid__SHIFT 0x18 +#define DPM_TABLE_116__GraphicsLevel_1_PowerThrottle_MASK 0xff +#define DPM_TABLE_116__GraphicsLevel_1_PowerThrottle__SHIFT 0x0 +#define DPM_TABLE_116__GraphicsLevel_1_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_116__GraphicsLevel_1_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_116__GraphicsLevel_1_DownHyst_MASK 0xff0000 +#define DPM_TABLE_116__GraphicsLevel_1_DownHyst__SHIFT 0x10 +#define DPM_TABLE_116__GraphicsLevel_1_UpHyst_MASK 0xff000000 +#define DPM_TABLE_116__GraphicsLevel_1_UpHyst__SHIFT 0x18 +#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_118__GraphicsLevel_2_SclkFrequency_MASK 0xffffffff +#define DPM_TABLE_118__GraphicsLevel_2_SclkFrequency__SHIFT 0x0 +#define DPM_TABLE_119__GraphicsLevel_2_ActivityLevel_MASK 0xffff +#define DPM_TABLE_119__GraphicsLevel_2_ActivityLevel__SHIFT 0x0 +#define DPM_TABLE_119__GraphicsLevel_2_DeepSleepDivId_MASK 0xff0000 +#define DPM_TABLE_119__GraphicsLevel_2_DeepSleepDivId__SHIFT 0x10 +#define DPM_TABLE_119__GraphicsLevel_2_pcieDpmLevel_MASK 0xff000000 +#define DPM_TABLE_119__GraphicsLevel_2_pcieDpmLevel__SHIFT 0x18 +#define DPM_TABLE_120__GraphicsLevel_2_CgSpllFuncCntl3_MASK 0xffffffff +#define DPM_TABLE_120__GraphicsLevel_2_CgSpllFuncCntl3__SHIFT 0x0 +#define DPM_TABLE_121__GraphicsLevel_2_CgSpllFuncCntl4_MASK 0xffffffff +#define DPM_TABLE_121__GraphicsLevel_2_CgSpllFuncCntl4__SHIFT 0x0 +#define DPM_TABLE_122__GraphicsLevel_2_SpllSpreadSpectrum_MASK 0xffffffff +#define DPM_TABLE_122__GraphicsLevel_2_SpllSpreadSpectrum__SHIFT 0x0 +#define DPM_TABLE_123__GraphicsLevel_2_SpllSpreadSpectrum2_MASK 0xffffffff +#define DPM_TABLE_123__GraphicsLevel_2_SpllSpreadSpectrum2__SHIFT 0x0 +#define DPM_TABLE_124__GraphicsLevel_2_CcPwrDynRm_MASK 0xffffffff +#define DPM_TABLE_124__GraphicsLevel_2_CcPwrDynRm__SHIFT 0x0 +#define DPM_TABLE_125__GraphicsLevel_2_CcPwrDynRm1_MASK 0xffffffff +#define DPM_TABLE_125__GraphicsLevel_2_CcPwrDynRm1__SHIFT 0x0 +#define DPM_TABLE_126__GraphicsLevel_2_EnabledForThrottle_MASK 0xff +#define DPM_TABLE_126__GraphicsLevel_2_EnabledForThrottle__SHIFT 0x0 +#define DPM_TABLE_126__GraphicsLevel_2_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_126__GraphicsLevel_2_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_126__GraphicsLevel_2_DisplayWatermark_MASK 0xff0000 +#define DPM_TABLE_126__GraphicsLevel_2_DisplayWatermark__SHIFT 0x10 +#define DPM_TABLE_126__GraphicsLevel_2_SclkDid_MASK 0xff000000 +#define DPM_TABLE_126__GraphicsLevel_2_SclkDid__SHIFT 0x18 +#define DPM_TABLE_127__GraphicsLevel_2_PowerThrottle_MASK 0xff +#define DPM_TABLE_127__GraphicsLevel_2_PowerThrottle__SHIFT 0x0 +#define DPM_TABLE_127__GraphicsLevel_2_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_127__GraphicsLevel_2_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_127__GraphicsLevel_2_DownHyst_MASK 0xff0000 +#define DPM_TABLE_127__GraphicsLevel_2_DownHyst__SHIFT 0x10 +#define DPM_TABLE_127__GraphicsLevel_2_UpHyst_MASK 0xff000000 +#define DPM_TABLE_127__GraphicsLevel_2_UpHyst__SHIFT 0x18 +#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_129__GraphicsLevel_3_SclkFrequency_MASK 0xffffffff +#define DPM_TABLE_129__GraphicsLevel_3_SclkFrequency__SHIFT 0x0 +#define DPM_TABLE_130__GraphicsLevel_3_ActivityLevel_MASK 0xffff +#define DPM_TABLE_130__GraphicsLevel_3_ActivityLevel__SHIFT 0x0 +#define DPM_TABLE_130__GraphicsLevel_3_DeepSleepDivId_MASK 0xff0000 +#define DPM_TABLE_130__GraphicsLevel_3_DeepSleepDivId__SHIFT 0x10 +#define DPM_TABLE_130__GraphicsLevel_3_pcieDpmLevel_MASK 0xff000000 +#define DPM_TABLE_130__GraphicsLevel_3_pcieDpmLevel__SHIFT 0x18 +#define DPM_TABLE_131__GraphicsLevel_3_CgSpllFuncCntl3_MASK 0xffffffff +#define DPM_TABLE_131__GraphicsLevel_3_CgSpllFuncCntl3__SHIFT 0x0 +#define DPM_TABLE_132__GraphicsLevel_3_CgSpllFuncCntl4_MASK 0xffffffff +#define DPM_TABLE_132__GraphicsLevel_3_CgSpllFuncCntl4__SHIFT 0x0 +#define DPM_TABLE_133__GraphicsLevel_3_SpllSpreadSpectrum_MASK 0xffffffff +#define DPM_TABLE_133__GraphicsLevel_3_SpllSpreadSpectrum__SHIFT 0x0 +#define DPM_TABLE_134__GraphicsLevel_3_SpllSpreadSpectrum2_MASK 0xffffffff +#define DPM_TABLE_134__GraphicsLevel_3_SpllSpreadSpectrum2__SHIFT 0x0 +#define DPM_TABLE_135__GraphicsLevel_3_CcPwrDynRm_MASK 0xffffffff +#define DPM_TABLE_135__GraphicsLevel_3_CcPwrDynRm__SHIFT 0x0 +#define DPM_TABLE_136__GraphicsLevel_3_CcPwrDynRm1_MASK 0xffffffff +#define DPM_TABLE_136__GraphicsLevel_3_CcPwrDynRm1__SHIFT 0x0 +#define DPM_TABLE_137__GraphicsLevel_3_EnabledForThrottle_MASK 0xff +#define DPM_TABLE_137__GraphicsLevel_3_EnabledForThrottle__SHIFT 0x0 +#define DPM_TABLE_137__GraphicsLevel_3_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_137__GraphicsLevel_3_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_137__GraphicsLevel_3_DisplayWatermark_MASK 0xff0000 +#define DPM_TABLE_137__GraphicsLevel_3_DisplayWatermark__SHIFT 0x10 +#define DPM_TABLE_137__GraphicsLevel_3_SclkDid_MASK 0xff000000 +#define DPM_TABLE_137__GraphicsLevel_3_SclkDid__SHIFT 0x18 +#define DPM_TABLE_138__GraphicsLevel_3_PowerThrottle_MASK 0xff +#define DPM_TABLE_138__GraphicsLevel_3_PowerThrottle__SHIFT 0x0 +#define DPM_TABLE_138__GraphicsLevel_3_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_138__GraphicsLevel_3_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_138__GraphicsLevel_3_DownHyst_MASK 0xff0000 +#define DPM_TABLE_138__GraphicsLevel_3_DownHyst__SHIFT 0x10 +#define DPM_TABLE_138__GraphicsLevel_3_UpHyst_MASK 0xff000000 +#define DPM_TABLE_138__GraphicsLevel_3_UpHyst__SHIFT 0x18 +#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_140__GraphicsLevel_4_SclkFrequency_MASK 0xffffffff +#define DPM_TABLE_140__GraphicsLevel_4_SclkFrequency__SHIFT 0x0 +#define DPM_TABLE_141__GraphicsLevel_4_ActivityLevel_MASK 0xffff +#define DPM_TABLE_141__GraphicsLevel_4_ActivityLevel__SHIFT 0x0 +#define DPM_TABLE_141__GraphicsLevel_4_DeepSleepDivId_MASK 0xff0000 +#define DPM_TABLE_141__GraphicsLevel_4_DeepSleepDivId__SHIFT 0x10 +#define DPM_TABLE_141__GraphicsLevel_4_pcieDpmLevel_MASK 0xff000000 +#define DPM_TABLE_141__GraphicsLevel_4_pcieDpmLevel__SHIFT 0x18 +#define DPM_TABLE_142__GraphicsLevel_4_CgSpllFuncCntl3_MASK 0xffffffff +#define DPM_TABLE_142__GraphicsLevel_4_CgSpllFuncCntl3__SHIFT 0x0 +#define DPM_TABLE_143__GraphicsLevel_4_CgSpllFuncCntl4_MASK 0xffffffff +#define DPM_TABLE_143__GraphicsLevel_4_CgSpllFuncCntl4__SHIFT 0x0 +#define DPM_TABLE_144__GraphicsLevel_4_SpllSpreadSpectrum_MASK 0xffffffff +#define DPM_TABLE_144__GraphicsLevel_4_SpllSpreadSpectrum__SHIFT 0x0 +#define DPM_TABLE_145__GraphicsLevel_4_SpllSpreadSpectrum2_MASK 0xffffffff +#define DPM_TABLE_145__GraphicsLevel_4_SpllSpreadSpectrum2__SHIFT 0x0 +#define DPM_TABLE_146__GraphicsLevel_4_CcPwrDynRm_MASK 0xffffffff +#define DPM_TABLE_146__GraphicsLevel_4_CcPwrDynRm__SHIFT 0x0 +#define DPM_TABLE_147__GraphicsLevel_4_CcPwrDynRm1_MASK 0xffffffff +#define DPM_TABLE_147__GraphicsLevel_4_CcPwrDynRm1__SHIFT 0x0 +#define DPM_TABLE_148__GraphicsLevel_4_EnabledForThrottle_MASK 0xff +#define DPM_TABLE_148__GraphicsLevel_4_EnabledForThrottle__SHIFT 0x0 +#define DPM_TABLE_148__GraphicsLevel_4_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_148__GraphicsLevel_4_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_148__GraphicsLevel_4_DisplayWatermark_MASK 0xff0000 +#define DPM_TABLE_148__GraphicsLevel_4_DisplayWatermark__SHIFT 0x10 +#define DPM_TABLE_148__GraphicsLevel_4_SclkDid_MASK 0xff000000 +#define DPM_TABLE_148__GraphicsLevel_4_SclkDid__SHIFT 0x18 +#define DPM_TABLE_149__GraphicsLevel_4_PowerThrottle_MASK 0xff +#define DPM_TABLE_149__GraphicsLevel_4_PowerThrottle__SHIFT 0x0 +#define DPM_TABLE_149__GraphicsLevel_4_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_149__GraphicsLevel_4_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_149__GraphicsLevel_4_DownHyst_MASK 0xff0000 +#define DPM_TABLE_149__GraphicsLevel_4_DownHyst__SHIFT 0x10 +#define DPM_TABLE_149__GraphicsLevel_4_UpHyst_MASK 0xff000000 +#define DPM_TABLE_149__GraphicsLevel_4_UpHyst__SHIFT 0x18 +#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_151__GraphicsLevel_5_SclkFrequency_MASK 0xffffffff +#define DPM_TABLE_151__GraphicsLevel_5_SclkFrequency__SHIFT 0x0 +#define DPM_TABLE_152__GraphicsLevel_5_ActivityLevel_MASK 0xffff +#define DPM_TABLE_152__GraphicsLevel_5_ActivityLevel__SHIFT 0x0 +#define DPM_TABLE_152__GraphicsLevel_5_DeepSleepDivId_MASK 0xff0000 +#define DPM_TABLE_152__GraphicsLevel_5_DeepSleepDivId__SHIFT 0x10 +#define DPM_TABLE_152__GraphicsLevel_5_pcieDpmLevel_MASK 0xff000000 +#define DPM_TABLE_152__GraphicsLevel_5_pcieDpmLevel__SHIFT 0x18 +#define DPM_TABLE_153__GraphicsLevel_5_CgSpllFuncCntl3_MASK 0xffffffff +#define DPM_TABLE_153__GraphicsLevel_5_CgSpllFuncCntl3__SHIFT 0x0 +#define DPM_TABLE_154__GraphicsLevel_5_CgSpllFuncCntl4_MASK 0xffffffff +#define DPM_TABLE_154__GraphicsLevel_5_CgSpllFuncCntl4__SHIFT 0x0 +#define DPM_TABLE_155__GraphicsLevel_5_SpllSpreadSpectrum_MASK 0xffffffff +#define DPM_TABLE_155__GraphicsLevel_5_SpllSpreadSpectrum__SHIFT 0x0 +#define DPM_TABLE_156__GraphicsLevel_5_SpllSpreadSpectrum2_MASK 0xffffffff +#define DPM_TABLE_156__GraphicsLevel_5_SpllSpreadSpectrum2__SHIFT 0x0 +#define DPM_TABLE_157__GraphicsLevel_5_CcPwrDynRm_MASK 0xffffffff +#define DPM_TABLE_157__GraphicsLevel_5_CcPwrDynRm__SHIFT 0x0 +#define DPM_TABLE_158__GraphicsLevel_5_CcPwrDynRm1_MASK 0xffffffff +#define DPM_TABLE_158__GraphicsLevel_5_CcPwrDynRm1__SHIFT 0x0 +#define DPM_TABLE_159__GraphicsLevel_5_EnabledForThrottle_MASK 0xff +#define DPM_TABLE_159__GraphicsLevel_5_EnabledForThrottle__SHIFT 0x0 +#define DPM_TABLE_159__GraphicsLevel_5_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_159__GraphicsLevel_5_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_159__GraphicsLevel_5_DisplayWatermark_MASK 0xff0000 +#define DPM_TABLE_159__GraphicsLevel_5_DisplayWatermark__SHIFT 0x10 +#define DPM_TABLE_159__GraphicsLevel_5_SclkDid_MASK 0xff000000 +#define DPM_TABLE_159__GraphicsLevel_5_SclkDid__SHIFT 0x18 +#define DPM_TABLE_160__GraphicsLevel_5_PowerThrottle_MASK 0xff +#define DPM_TABLE_160__GraphicsLevel_5_PowerThrottle__SHIFT 0x0 +#define DPM_TABLE_160__GraphicsLevel_5_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_160__GraphicsLevel_5_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_160__GraphicsLevel_5_DownHyst_MASK 0xff0000 +#define DPM_TABLE_160__GraphicsLevel_5_DownHyst__SHIFT 0x10 +#define DPM_TABLE_160__GraphicsLevel_5_UpHyst_MASK 0xff000000 +#define DPM_TABLE_160__GraphicsLevel_5_UpHyst__SHIFT 0x18 +#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_162__GraphicsLevel_6_SclkFrequency_MASK 0xffffffff +#define DPM_TABLE_162__GraphicsLevel_6_SclkFrequency__SHIFT 0x0 +#define DPM_TABLE_163__GraphicsLevel_6_ActivityLevel_MASK 0xffff +#define DPM_TABLE_163__GraphicsLevel_6_ActivityLevel__SHIFT 0x0 +#define DPM_TABLE_163__GraphicsLevel_6_DeepSleepDivId_MASK 0xff0000 +#define DPM_TABLE_163__GraphicsLevel_6_DeepSleepDivId__SHIFT 0x10 +#define DPM_TABLE_163__GraphicsLevel_6_pcieDpmLevel_MASK 0xff000000 +#define DPM_TABLE_163__GraphicsLevel_6_pcieDpmLevel__SHIFT 0x18 +#define DPM_TABLE_164__GraphicsLevel_6_CgSpllFuncCntl3_MASK 0xffffffff +#define DPM_TABLE_164__GraphicsLevel_6_CgSpllFuncCntl3__SHIFT 0x0 +#define DPM_TABLE_165__GraphicsLevel_6_CgSpllFuncCntl4_MASK 0xffffffff +#define DPM_TABLE_165__GraphicsLevel_6_CgSpllFuncCntl4__SHIFT 0x0 +#define DPM_TABLE_166__GraphicsLevel_6_SpllSpreadSpectrum_MASK 0xffffffff +#define DPM_TABLE_166__GraphicsLevel_6_SpllSpreadSpectrum__SHIFT 0x0 +#define DPM_TABLE_167__GraphicsLevel_6_SpllSpreadSpectrum2_MASK 0xffffffff +#define DPM_TABLE_167__GraphicsLevel_6_SpllSpreadSpectrum2__SHIFT 0x0 +#define DPM_TABLE_168__GraphicsLevel_6_CcPwrDynRm_MASK 0xffffffff +#define DPM_TABLE_168__GraphicsLevel_6_CcPwrDynRm__SHIFT 0x0 +#define DPM_TABLE_169__GraphicsLevel_6_CcPwrDynRm1_MASK 0xffffffff +#define DPM_TABLE_169__GraphicsLevel_6_CcPwrDynRm1__SHIFT 0x0 +#define DPM_TABLE_170__GraphicsLevel_6_EnabledForThrottle_MASK 0xff +#define DPM_TABLE_170__GraphicsLevel_6_EnabledForThrottle__SHIFT 0x0 +#define DPM_TABLE_170__GraphicsLevel_6_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_170__GraphicsLevel_6_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_170__GraphicsLevel_6_DisplayWatermark_MASK 0xff0000 +#define DPM_TABLE_170__GraphicsLevel_6_DisplayWatermark__SHIFT 0x10 +#define DPM_TABLE_170__GraphicsLevel_6_SclkDid_MASK 0xff000000 +#define DPM_TABLE_170__GraphicsLevel_6_SclkDid__SHIFT 0x18 +#define DPM_TABLE_171__GraphicsLevel_6_PowerThrottle_MASK 0xff +#define DPM_TABLE_171__GraphicsLevel_6_PowerThrottle__SHIFT 0x0 +#define DPM_TABLE_171__GraphicsLevel_6_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_171__GraphicsLevel_6_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_171__GraphicsLevel_6_DownHyst_MASK 0xff0000 +#define DPM_TABLE_171__GraphicsLevel_6_DownHyst__SHIFT 0x10 +#define DPM_TABLE_171__GraphicsLevel_6_UpHyst_MASK 0xff000000 +#define DPM_TABLE_171__GraphicsLevel_6_UpHyst__SHIFT 0x18 +#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_173__GraphicsLevel_7_SclkFrequency_MASK 0xffffffff +#define DPM_TABLE_173__GraphicsLevel_7_SclkFrequency__SHIFT 0x0 +#define DPM_TABLE_174__GraphicsLevel_7_ActivityLevel_MASK 0xffff +#define DPM_TABLE_174__GraphicsLevel_7_ActivityLevel__SHIFT 0x0 +#define DPM_TABLE_174__GraphicsLevel_7_DeepSleepDivId_MASK 0xff0000 +#define DPM_TABLE_174__GraphicsLevel_7_DeepSleepDivId__SHIFT 0x10 +#define DPM_TABLE_174__GraphicsLevel_7_pcieDpmLevel_MASK 0xff000000 +#define DPM_TABLE_174__GraphicsLevel_7_pcieDpmLevel__SHIFT 0x18 +#define DPM_TABLE_175__GraphicsLevel_7_CgSpllFuncCntl3_MASK 0xffffffff +#define DPM_TABLE_175__GraphicsLevel_7_CgSpllFuncCntl3__SHIFT 0x0 +#define DPM_TABLE_176__GraphicsLevel_7_CgSpllFuncCntl4_MASK 0xffffffff +#define DPM_TABLE_176__GraphicsLevel_7_CgSpllFuncCntl4__SHIFT 0x0 +#define DPM_TABLE_177__GraphicsLevel_7_SpllSpreadSpectrum_MASK 0xffffffff +#define DPM_TABLE_177__GraphicsLevel_7_SpllSpreadSpectrum__SHIFT 0x0 +#define DPM_TABLE_178__GraphicsLevel_7_SpllSpreadSpectrum2_MASK 0xffffffff +#define DPM_TABLE_178__GraphicsLevel_7_SpllSpreadSpectrum2__SHIFT 0x0 +#define DPM_TABLE_179__GraphicsLevel_7_CcPwrDynRm_MASK 0xffffffff +#define DPM_TABLE_179__GraphicsLevel_7_CcPwrDynRm__SHIFT 0x0 +#define DPM_TABLE_180__GraphicsLevel_7_CcPwrDynRm1_MASK 0xffffffff +#define DPM_TABLE_180__GraphicsLevel_7_CcPwrDynRm1__SHIFT 0x0 +#define DPM_TABLE_181__GraphicsLevel_7_EnabledForThrottle_MASK 0xff +#define DPM_TABLE_181__GraphicsLevel_7_EnabledForThrottle__SHIFT 0x0 +#define DPM_TABLE_181__GraphicsLevel_7_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_181__GraphicsLevel_7_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_181__GraphicsLevel_7_DisplayWatermark_MASK 0xff0000 +#define DPM_TABLE_181__GraphicsLevel_7_DisplayWatermark__SHIFT 0x10 +#define DPM_TABLE_181__GraphicsLevel_7_SclkDid_MASK 0xff000000 +#define DPM_TABLE_181__GraphicsLevel_7_SclkDid__SHIFT 0x18 +#define DPM_TABLE_182__GraphicsLevel_7_PowerThrottle_MASK 0xff +#define DPM_TABLE_182__GraphicsLevel_7_PowerThrottle__SHIFT 0x0 +#define DPM_TABLE_182__GraphicsLevel_7_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_182__GraphicsLevel_7_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_182__GraphicsLevel_7_DownHyst_MASK 0xff0000 +#define DPM_TABLE_182__GraphicsLevel_7_DownHyst__SHIFT 0x10 +#define DPM_TABLE_182__GraphicsLevel_7_UpHyst_MASK 0xff000000 +#define DPM_TABLE_182__GraphicsLevel_7_UpHyst__SHIFT 0x18 +#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_184__MemoryACPILevel_MinMvdd_MASK 0xffffffff +#define DPM_TABLE_184__MemoryACPILevel_MinMvdd__SHIFT 0x0 +#define DPM_TABLE_185__MemoryACPILevel_MclkFrequency_MASK 0xffffffff +#define DPM_TABLE_185__MemoryACPILevel_MclkFrequency__SHIFT 0x0 +#define DPM_TABLE_186__MemoryACPILevel_EnabledForActivity_MASK 0xff +#define DPM_TABLE_186__MemoryACPILevel_EnabledForActivity__SHIFT 0x0 +#define DPM_TABLE_186__MemoryACPILevel_EnabledForThrottle_MASK 0xff00 +#define DPM_TABLE_186__MemoryACPILevel_EnabledForThrottle__SHIFT 0x8 +#define DPM_TABLE_186__MemoryACPILevel_FreqRange_MASK 0xff0000 +#define DPM_TABLE_186__MemoryACPILevel_FreqRange__SHIFT 0x10 +#define DPM_TABLE_186__MemoryACPILevel_StutterEnable_MASK 0xff000000 +#define DPM_TABLE_186__MemoryACPILevel_StutterEnable__SHIFT 0x18 +#define DPM_TABLE_187__MemoryACPILevel_padding_MASK 0xff +#define DPM_TABLE_187__MemoryACPILevel_padding__SHIFT 0x0 +#define DPM_TABLE_187__MemoryACPILevel_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_187__MemoryACPILevel_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_187__MemoryACPILevel_DownHyst_MASK 0xff0000 +#define DPM_TABLE_187__MemoryACPILevel_DownHyst__SHIFT 0x10 +#define DPM_TABLE_187__MemoryACPILevel_UpHyst_MASK 0xff000000 +#define DPM_TABLE_187__MemoryACPILevel_UpHyst__SHIFT 0x18 +#define DPM_TABLE_188__MemoryACPILevel_MclkDivider_MASK 0xff +#define DPM_TABLE_188__MemoryACPILevel_MclkDivider__SHIFT 0x0 +#define DPM_TABLE_188__MemoryACPILevel_DisplayWatermark_MASK 0xff00 +#define DPM_TABLE_188__MemoryACPILevel_DisplayWatermark__SHIFT 0x8 +#define DPM_TABLE_188__MemoryACPILevel_ActivityLevel_MASK 0xffff0000 +#define DPM_TABLE_188__MemoryACPILevel_ActivityLevel__SHIFT 0x10 +#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_190__MemoryLevel_0_MinMvdd_MASK 0xffffffff +#define DPM_TABLE_190__MemoryLevel_0_MinMvdd__SHIFT 0x0 +#define DPM_TABLE_191__MemoryLevel_0_MclkFrequency_MASK 0xffffffff +#define DPM_TABLE_191__MemoryLevel_0_MclkFrequency__SHIFT 0x0 +#define DPM_TABLE_192__MemoryLevel_0_EnabledForActivity_MASK 0xff +#define DPM_TABLE_192__MemoryLevel_0_EnabledForActivity__SHIFT 0x0 +#define DPM_TABLE_192__MemoryLevel_0_EnabledForThrottle_MASK 0xff00 +#define DPM_TABLE_192__MemoryLevel_0_EnabledForThrottle__SHIFT 0x8 +#define DPM_TABLE_192__MemoryLevel_0_FreqRange_MASK 0xff0000 +#define DPM_TABLE_192__MemoryLevel_0_FreqRange__SHIFT 0x10 +#define DPM_TABLE_192__MemoryLevel_0_StutterEnable_MASK 0xff000000 +#define DPM_TABLE_192__MemoryLevel_0_StutterEnable__SHIFT 0x18 +#define DPM_TABLE_193__MemoryLevel_0_padding_MASK 0xff +#define DPM_TABLE_193__MemoryLevel_0_padding__SHIFT 0x0 +#define DPM_TABLE_193__MemoryLevel_0_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_193__MemoryLevel_0_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_193__MemoryLevel_0_DownHyst_MASK 0xff0000 +#define DPM_TABLE_193__MemoryLevel_0_DownHyst__SHIFT 0x10 +#define DPM_TABLE_193__MemoryLevel_0_UpHyst_MASK 0xff000000 +#define DPM_TABLE_193__MemoryLevel_0_UpHyst__SHIFT 0x18 +#define DPM_TABLE_194__MemoryLevel_0_MclkDivider_MASK 0xff +#define DPM_TABLE_194__MemoryLevel_0_MclkDivider__SHIFT 0x0 +#define DPM_TABLE_194__MemoryLevel_0_DisplayWatermark_MASK 0xff00 +#define DPM_TABLE_194__MemoryLevel_0_DisplayWatermark__SHIFT 0x8 +#define DPM_TABLE_194__MemoryLevel_0_ActivityLevel_MASK 0xffff0000 +#define DPM_TABLE_194__MemoryLevel_0_ActivityLevel__SHIFT 0x10 +#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_196__MemoryLevel_1_MinMvdd_MASK 0xffffffff +#define DPM_TABLE_196__MemoryLevel_1_MinMvdd__SHIFT 0x0 +#define DPM_TABLE_197__MemoryLevel_1_MclkFrequency_MASK 0xffffffff +#define DPM_TABLE_197__MemoryLevel_1_MclkFrequency__SHIFT 0x0 +#define DPM_TABLE_198__MemoryLevel_1_EnabledForActivity_MASK 0xff +#define DPM_TABLE_198__MemoryLevel_1_EnabledForActivity__SHIFT 0x0 +#define DPM_TABLE_198__MemoryLevel_1_EnabledForThrottle_MASK 0xff00 +#define DPM_TABLE_198__MemoryLevel_1_EnabledForThrottle__SHIFT 0x8 +#define DPM_TABLE_198__MemoryLevel_1_FreqRange_MASK 0xff0000 +#define DPM_TABLE_198__MemoryLevel_1_FreqRange__SHIFT 0x10 +#define DPM_TABLE_198__MemoryLevel_1_StutterEnable_MASK 0xff000000 +#define DPM_TABLE_198__MemoryLevel_1_StutterEnable__SHIFT 0x18 +#define DPM_TABLE_199__MemoryLevel_1_padding_MASK 0xff +#define DPM_TABLE_199__MemoryLevel_1_padding__SHIFT 0x0 +#define DPM_TABLE_199__MemoryLevel_1_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_199__MemoryLevel_1_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_199__MemoryLevel_1_DownHyst_MASK 0xff0000 +#define DPM_TABLE_199__MemoryLevel_1_DownHyst__SHIFT 0x10 +#define DPM_TABLE_199__MemoryLevel_1_UpHyst_MASK 0xff000000 +#define DPM_TABLE_199__MemoryLevel_1_UpHyst__SHIFT 0x18 +#define DPM_TABLE_200__MemoryLevel_1_MclkDivider_MASK 0xff +#define DPM_TABLE_200__MemoryLevel_1_MclkDivider__SHIFT 0x0 +#define DPM_TABLE_200__MemoryLevel_1_DisplayWatermark_MASK 0xff00 +#define DPM_TABLE_200__MemoryLevel_1_DisplayWatermark__SHIFT 0x8 +#define DPM_TABLE_200__MemoryLevel_1_ActivityLevel_MASK 0xffff0000 +#define DPM_TABLE_200__MemoryLevel_1_ActivityLevel__SHIFT 0x10 +#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_202__MemoryLevel_2_MinMvdd_MASK 0xffffffff +#define DPM_TABLE_202__MemoryLevel_2_MinMvdd__SHIFT 0x0 +#define DPM_TABLE_203__MemoryLevel_2_MclkFrequency_MASK 0xffffffff +#define DPM_TABLE_203__MemoryLevel_2_MclkFrequency__SHIFT 0x0 +#define DPM_TABLE_204__MemoryLevel_2_EnabledForActivity_MASK 0xff +#define DPM_TABLE_204__MemoryLevel_2_EnabledForActivity__SHIFT 0x0 +#define DPM_TABLE_204__MemoryLevel_2_EnabledForThrottle_MASK 0xff00 +#define DPM_TABLE_204__MemoryLevel_2_EnabledForThrottle__SHIFT 0x8 +#define DPM_TABLE_204__MemoryLevel_2_FreqRange_MASK 0xff0000 +#define DPM_TABLE_204__MemoryLevel_2_FreqRange__SHIFT 0x10 +#define DPM_TABLE_204__MemoryLevel_2_StutterEnable_MASK 0xff000000 +#define DPM_TABLE_204__MemoryLevel_2_StutterEnable__SHIFT 0x18 +#define DPM_TABLE_205__MemoryLevel_2_padding_MASK 0xff +#define DPM_TABLE_205__MemoryLevel_2_padding__SHIFT 0x0 +#define DPM_TABLE_205__MemoryLevel_2_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_205__MemoryLevel_2_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_205__MemoryLevel_2_DownHyst_MASK 0xff0000 +#define DPM_TABLE_205__MemoryLevel_2_DownHyst__SHIFT 0x10 +#define DPM_TABLE_205__MemoryLevel_2_UpHyst_MASK 0xff000000 +#define DPM_TABLE_205__MemoryLevel_2_UpHyst__SHIFT 0x18 +#define DPM_TABLE_206__MemoryLevel_2_MclkDivider_MASK 0xff +#define DPM_TABLE_206__MemoryLevel_2_MclkDivider__SHIFT 0x0 +#define DPM_TABLE_206__MemoryLevel_2_DisplayWatermark_MASK 0xff00 +#define DPM_TABLE_206__MemoryLevel_2_DisplayWatermark__SHIFT 0x8 +#define DPM_TABLE_206__MemoryLevel_2_ActivityLevel_MASK 0xffff0000 +#define DPM_TABLE_206__MemoryLevel_2_ActivityLevel__SHIFT 0x10 +#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_208__MemoryLevel_3_MinMvdd_MASK 0xffffffff +#define DPM_TABLE_208__MemoryLevel_3_MinMvdd__SHIFT 0x0 +#define DPM_TABLE_209__MemoryLevel_3_MclkFrequency_MASK 0xffffffff +#define DPM_TABLE_209__MemoryLevel_3_MclkFrequency__SHIFT 0x0 +#define DPM_TABLE_210__MemoryLevel_3_EnabledForActivity_MASK 0xff +#define DPM_TABLE_210__MemoryLevel_3_EnabledForActivity__SHIFT 0x0 +#define DPM_TABLE_210__MemoryLevel_3_EnabledForThrottle_MASK 0xff00 +#define DPM_TABLE_210__MemoryLevel_3_EnabledForThrottle__SHIFT 0x8 +#define DPM_TABLE_210__MemoryLevel_3_FreqRange_MASK 0xff0000 +#define DPM_TABLE_210__MemoryLevel_3_FreqRange__SHIFT 0x10 +#define DPM_TABLE_210__MemoryLevel_3_StutterEnable_MASK 0xff000000 +#define DPM_TABLE_210__MemoryLevel_3_StutterEnable__SHIFT 0x18 +#define DPM_TABLE_211__MemoryLevel_3_padding_MASK 0xff +#define DPM_TABLE_211__MemoryLevel_3_padding__SHIFT 0x0 +#define DPM_TABLE_211__MemoryLevel_3_VoltageDownHyst_MASK 0xff00 +#define DPM_TABLE_211__MemoryLevel_3_VoltageDownHyst__SHIFT 0x8 +#define DPM_TABLE_211__MemoryLevel_3_DownHyst_MASK 0xff0000 +#define DPM_TABLE_211__MemoryLevel_3_DownHyst__SHIFT 0x10 +#define DPM_TABLE_211__MemoryLevel_3_UpHyst_MASK 0xff000000 +#define DPM_TABLE_211__MemoryLevel_3_UpHyst__SHIFT 0x18 +#define DPM_TABLE_212__MemoryLevel_3_MclkDivider_MASK 0xff +#define DPM_TABLE_212__MemoryLevel_3_MclkDivider__SHIFT 0x0 +#define DPM_TABLE_212__MemoryLevel_3_DisplayWatermark_MASK 0xff00 +#define DPM_TABLE_212__MemoryLevel_3_DisplayWatermark__SHIFT 0x8 +#define DPM_TABLE_212__MemoryLevel_3_ActivityLevel_MASK 0xffff0000 +#define DPM_TABLE_212__MemoryLevel_3_ActivityLevel__SHIFT 0x10 +#define DPM_TABLE_213__LinkLevel_0_SPC_MASK 0xff +#define DPM_TABLE_213__LinkLevel_0_SPC__SHIFT 0x0 +#define DPM_TABLE_213__LinkLevel_0_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_213__LinkLevel_0_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_213__LinkLevel_0_PcieLaneCount_MASK 0xff0000 +#define DPM_TABLE_213__LinkLevel_0_PcieLaneCount__SHIFT 0x10 +#define DPM_TABLE_213__LinkLevel_0_PcieGenSpeed_MASK 0xff000000 +#define DPM_TABLE_213__LinkLevel_0_PcieGenSpeed__SHIFT 0x18 +#define DPM_TABLE_214__LinkLevel_0_DownThreshold_MASK 0xffffffff +#define DPM_TABLE_214__LinkLevel_0_DownThreshold__SHIFT 0x0 +#define DPM_TABLE_215__LinkLevel_0_UpThreshold_MASK 0xffffffff +#define DPM_TABLE_215__LinkLevel_0_UpThreshold__SHIFT 0x0 +#define DPM_TABLE_216__LinkLevel_0_Reserved_MASK 0xffffffff +#define DPM_TABLE_216__LinkLevel_0_Reserved__SHIFT 0x0 +#define DPM_TABLE_217__LinkLevel_1_SPC_MASK 0xff +#define DPM_TABLE_217__LinkLevel_1_SPC__SHIFT 0x0 +#define DPM_TABLE_217__LinkLevel_1_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_217__LinkLevel_1_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_217__LinkLevel_1_PcieLaneCount_MASK 0xff0000 +#define DPM_TABLE_217__LinkLevel_1_PcieLaneCount__SHIFT 0x10 +#define DPM_TABLE_217__LinkLevel_1_PcieGenSpeed_MASK 0xff000000 +#define DPM_TABLE_217__LinkLevel_1_PcieGenSpeed__SHIFT 0x18 +#define DPM_TABLE_218__LinkLevel_1_DownThreshold_MASK 0xffffffff +#define DPM_TABLE_218__LinkLevel_1_DownThreshold__SHIFT 0x0 +#define DPM_TABLE_219__LinkLevel_1_UpThreshold_MASK 0xffffffff +#define DPM_TABLE_219__LinkLevel_1_UpThreshold__SHIFT 0x0 +#define DPM_TABLE_220__LinkLevel_1_Reserved_MASK 0xffffffff +#define DPM_TABLE_220__LinkLevel_1_Reserved__SHIFT 0x0 +#define DPM_TABLE_221__LinkLevel_2_SPC_MASK 0xff +#define DPM_TABLE_221__LinkLevel_2_SPC__SHIFT 0x0 +#define DPM_TABLE_221__LinkLevel_2_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_221__LinkLevel_2_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_221__LinkLevel_2_PcieLaneCount_MASK 0xff0000 +#define DPM_TABLE_221__LinkLevel_2_PcieLaneCount__SHIFT 0x10 +#define DPM_TABLE_221__LinkLevel_2_PcieGenSpeed_MASK 0xff000000 +#define DPM_TABLE_221__LinkLevel_2_PcieGenSpeed__SHIFT 0x18 +#define DPM_TABLE_222__LinkLevel_2_DownThreshold_MASK 0xffffffff +#define DPM_TABLE_222__LinkLevel_2_DownThreshold__SHIFT 0x0 +#define DPM_TABLE_223__LinkLevel_2_UpThreshold_MASK 0xffffffff +#define DPM_TABLE_223__LinkLevel_2_UpThreshold__SHIFT 0x0 +#define DPM_TABLE_224__LinkLevel_2_Reserved_MASK 0xffffffff +#define DPM_TABLE_224__LinkLevel_2_Reserved__SHIFT 0x0 +#define DPM_TABLE_225__LinkLevel_3_SPC_MASK 0xff +#define DPM_TABLE_225__LinkLevel_3_SPC__SHIFT 0x0 +#define DPM_TABLE_225__LinkLevel_3_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_225__LinkLevel_3_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_225__LinkLevel_3_PcieLaneCount_MASK 0xff0000 +#define DPM_TABLE_225__LinkLevel_3_PcieLaneCount__SHIFT 0x10 +#define DPM_TABLE_225__LinkLevel_3_PcieGenSpeed_MASK 0xff000000 +#define DPM_TABLE_225__LinkLevel_3_PcieGenSpeed__SHIFT 0x18 +#define DPM_TABLE_226__LinkLevel_3_DownThreshold_MASK 0xffffffff +#define DPM_TABLE_226__LinkLevel_3_DownThreshold__SHIFT 0x0 +#define DPM_TABLE_227__LinkLevel_3_UpThreshold_MASK 0xffffffff +#define DPM_TABLE_227__LinkLevel_3_UpThreshold__SHIFT 0x0 +#define DPM_TABLE_228__LinkLevel_3_Reserved_MASK 0xffffffff +#define DPM_TABLE_228__LinkLevel_3_Reserved__SHIFT 0x0 +#define DPM_TABLE_229__LinkLevel_4_SPC_MASK 0xff +#define DPM_TABLE_229__LinkLevel_4_SPC__SHIFT 0x0 +#define DPM_TABLE_229__LinkLevel_4_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_229__LinkLevel_4_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_229__LinkLevel_4_PcieLaneCount_MASK 0xff0000 +#define DPM_TABLE_229__LinkLevel_4_PcieLaneCount__SHIFT 0x10 +#define DPM_TABLE_229__LinkLevel_4_PcieGenSpeed_MASK 0xff000000 +#define DPM_TABLE_229__LinkLevel_4_PcieGenSpeed__SHIFT 0x18 +#define DPM_TABLE_230__LinkLevel_4_DownThreshold_MASK 0xffffffff +#define DPM_TABLE_230__LinkLevel_4_DownThreshold__SHIFT 0x0 +#define DPM_TABLE_231__LinkLevel_4_UpThreshold_MASK 0xffffffff +#define DPM_TABLE_231__LinkLevel_4_UpThreshold__SHIFT 0x0 +#define DPM_TABLE_232__LinkLevel_4_Reserved_MASK 0xffffffff +#define DPM_TABLE_232__LinkLevel_4_Reserved__SHIFT 0x0 +#define DPM_TABLE_233__LinkLevel_5_SPC_MASK 0xff +#define DPM_TABLE_233__LinkLevel_5_SPC__SHIFT 0x0 +#define DPM_TABLE_233__LinkLevel_5_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_233__LinkLevel_5_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_233__LinkLevel_5_PcieLaneCount_MASK 0xff0000 +#define DPM_TABLE_233__LinkLevel_5_PcieLaneCount__SHIFT 0x10 +#define DPM_TABLE_233__LinkLevel_5_PcieGenSpeed_MASK 0xff000000 +#define DPM_TABLE_233__LinkLevel_5_PcieGenSpeed__SHIFT 0x18 +#define DPM_TABLE_234__LinkLevel_5_DownThreshold_MASK 0xffffffff +#define DPM_TABLE_234__LinkLevel_5_DownThreshold__SHIFT 0x0 +#define DPM_TABLE_235__LinkLevel_5_UpThreshold_MASK 0xffffffff +#define DPM_TABLE_235__LinkLevel_5_UpThreshold__SHIFT 0x0 +#define DPM_TABLE_236__LinkLevel_5_Reserved_MASK 0xffffffff +#define DPM_TABLE_236__LinkLevel_5_Reserved__SHIFT 0x0 +#define DPM_TABLE_237__LinkLevel_6_SPC_MASK 0xff +#define DPM_TABLE_237__LinkLevel_6_SPC__SHIFT 0x0 +#define DPM_TABLE_237__LinkLevel_6_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_237__LinkLevel_6_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_237__LinkLevel_6_PcieLaneCount_MASK 0xff0000 +#define DPM_TABLE_237__LinkLevel_6_PcieLaneCount__SHIFT 0x10 +#define DPM_TABLE_237__LinkLevel_6_PcieGenSpeed_MASK 0xff000000 +#define DPM_TABLE_237__LinkLevel_6_PcieGenSpeed__SHIFT 0x18 +#define DPM_TABLE_238__LinkLevel_6_DownThreshold_MASK 0xffffffff +#define DPM_TABLE_238__LinkLevel_6_DownThreshold__SHIFT 0x0 +#define DPM_TABLE_239__LinkLevel_6_UpThreshold_MASK 0xffffffff +#define DPM_TABLE_239__LinkLevel_6_UpThreshold__SHIFT 0x0 +#define DPM_TABLE_240__LinkLevel_6_Reserved_MASK 0xffffffff +#define DPM_TABLE_240__LinkLevel_6_Reserved__SHIFT 0x0 +#define DPM_TABLE_241__LinkLevel_7_SPC_MASK 0xff +#define DPM_TABLE_241__LinkLevel_7_SPC__SHIFT 0x0 +#define DPM_TABLE_241__LinkLevel_7_EnabledForActivity_MASK 0xff00 +#define DPM_TABLE_241__LinkLevel_7_EnabledForActivity__SHIFT 0x8 +#define DPM_TABLE_241__LinkLevel_7_PcieLaneCount_MASK 0xff0000 +#define DPM_TABLE_241__LinkLevel_7_PcieLaneCount__SHIFT 0x10 +#define DPM_TABLE_241__LinkLevel_7_PcieGenSpeed_MASK 0xff000000 +#define DPM_TABLE_241__LinkLevel_7_PcieGenSpeed__SHIFT 0x18 +#define DPM_TABLE_242__LinkLevel_7_DownThreshold_MASK 0xffffffff +#define DPM_TABLE_242__LinkLevel_7_DownThreshold__SHIFT 0x0 +#define DPM_TABLE_243__LinkLevel_7_UpThreshold_MASK 0xffffffff +#define DPM_TABLE_243__LinkLevel_7_UpThreshold__SHIFT 0x0 +#define DPM_TABLE_244__LinkLevel_7_Reserved_MASK 0xffffffff +#define DPM_TABLE_244__LinkLevel_7_Reserved__SHIFT 0x0 +#define DPM_TABLE_245__ACPILevel_Flags_MASK 0xffffffff +#define DPM_TABLE_245__ACPILevel_Flags__SHIFT 0x0 +#define DPM_TABLE_246__ACPILevel_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_246__ACPILevel_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_246__ACPILevel_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_246__ACPILevel_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_247__ACPILevel_SclkFrequency_MASK 0xffffffff +#define DPM_TABLE_247__ACPILevel_SclkFrequency__SHIFT 0x0 +#define DPM_TABLE_248__ACPILevel_padding_MASK 0xff +#define DPM_TABLE_248__ACPILevel_padding__SHIFT 0x0 +#define DPM_TABLE_248__ACPILevel_DeepSleepDivId_MASK 0xff00 +#define DPM_TABLE_248__ACPILevel_DeepSleepDivId__SHIFT 0x8 +#define DPM_TABLE_248__ACPILevel_DisplayWatermark_MASK 0xff0000 +#define DPM_TABLE_248__ACPILevel_DisplayWatermark__SHIFT 0x10 +#define DPM_TABLE_248__ACPILevel_SclkDid_MASK 0xff000000 +#define DPM_TABLE_248__ACPILevel_SclkDid__SHIFT 0x18 +#define DPM_TABLE_249__ACPILevel_CgSpllFuncCntl_MASK 0xffffffff +#define DPM_TABLE_249__ACPILevel_CgSpllFuncCntl__SHIFT 0x0 +#define DPM_TABLE_250__ACPILevel_CgSpllFuncCntl2_MASK 0xffffffff +#define DPM_TABLE_250__ACPILevel_CgSpllFuncCntl2__SHIFT 0x0 +#define DPM_TABLE_251__ACPILevel_CgSpllFuncCntl3_MASK 0xffffffff +#define DPM_TABLE_251__ACPILevel_CgSpllFuncCntl3__SHIFT 0x0 +#define DPM_TABLE_252__ACPILevel_CgSpllFuncCntl4_MASK 0xffffffff +#define DPM_TABLE_252__ACPILevel_CgSpllFuncCntl4__SHIFT 0x0 +#define DPM_TABLE_253__ACPILevel_SpllSpreadSpectrum_MASK 0xffffffff +#define DPM_TABLE_253__ACPILevel_SpllSpreadSpectrum__SHIFT 0x0 +#define DPM_TABLE_254__ACPILevel_SpllSpreadSpectrum2_MASK 0xffffffff +#define DPM_TABLE_254__ACPILevel_SpllSpreadSpectrum2__SHIFT 0x0 +#define DPM_TABLE_255__ACPILevel_CcPwrDynRm_MASK 0xffffffff +#define DPM_TABLE_255__ACPILevel_CcPwrDynRm__SHIFT 0x0 +#define DPM_TABLE_256__ACPILevel_CcPwrDynRm1_MASK 0xffffffff +#define DPM_TABLE_256__ACPILevel_CcPwrDynRm1__SHIFT 0x0 +#define DPM_TABLE_257__UvdLevel_0_VclkFrequency_MASK 0xffffffff +#define DPM_TABLE_257__UvdLevel_0_VclkFrequency__SHIFT 0x0 +#define DPM_TABLE_258__UvdLevel_0_DclkFrequency_MASK 0xffffffff +#define DPM_TABLE_258__UvdLevel_0_DclkFrequency__SHIFT 0x0 +#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_259__UvdLevel_0_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_259__UvdLevel_0_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_260__UvdLevel_0_padding_1_MASK 0xff +#define DPM_TABLE_260__UvdLevel_0_padding_1__SHIFT 0x0 +#define DPM_TABLE_260__UvdLevel_0_padding_0_MASK 0xff00 +#define DPM_TABLE_260__UvdLevel_0_padding_0__SHIFT 0x8 +#define DPM_TABLE_260__UvdLevel_0_DclkDivider_MASK 0xff0000 +#define DPM_TABLE_260__UvdLevel_0_DclkDivider__SHIFT 0x10 +#define DPM_TABLE_260__UvdLevel_0_VclkDivider_MASK 0xff000000 +#define DPM_TABLE_260__UvdLevel_0_VclkDivider__SHIFT 0x18 +#define DPM_TABLE_261__UvdLevel_1_VclkFrequency_MASK 0xffffffff +#define DPM_TABLE_261__UvdLevel_1_VclkFrequency__SHIFT 0x0 +#define DPM_TABLE_262__UvdLevel_1_DclkFrequency_MASK 0xffffffff +#define DPM_TABLE_262__UvdLevel_1_DclkFrequency__SHIFT 0x0 +#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_263__UvdLevel_1_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_263__UvdLevel_1_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_264__UvdLevel_1_padding_1_MASK 0xff +#define DPM_TABLE_264__UvdLevel_1_padding_1__SHIFT 0x0 +#define DPM_TABLE_264__UvdLevel_1_padding_0_MASK 0xff00 +#define DPM_TABLE_264__UvdLevel_1_padding_0__SHIFT 0x8 +#define DPM_TABLE_264__UvdLevel_1_DclkDivider_MASK 0xff0000 +#define DPM_TABLE_264__UvdLevel_1_DclkDivider__SHIFT 0x10 +#define DPM_TABLE_264__UvdLevel_1_VclkDivider_MASK 0xff000000 +#define DPM_TABLE_264__UvdLevel_1_VclkDivider__SHIFT 0x18 +#define DPM_TABLE_265__UvdLevel_2_VclkFrequency_MASK 0xffffffff +#define DPM_TABLE_265__UvdLevel_2_VclkFrequency__SHIFT 0x0 +#define DPM_TABLE_266__UvdLevel_2_DclkFrequency_MASK 0xffffffff +#define DPM_TABLE_266__UvdLevel_2_DclkFrequency__SHIFT 0x0 +#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_267__UvdLevel_2_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_267__UvdLevel_2_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_268__UvdLevel_2_padding_1_MASK 0xff +#define DPM_TABLE_268__UvdLevel_2_padding_1__SHIFT 0x0 +#define DPM_TABLE_268__UvdLevel_2_padding_0_MASK 0xff00 +#define DPM_TABLE_268__UvdLevel_2_padding_0__SHIFT 0x8 +#define DPM_TABLE_268__UvdLevel_2_DclkDivider_MASK 0xff0000 +#define DPM_TABLE_268__UvdLevel_2_DclkDivider__SHIFT 0x10 +#define DPM_TABLE_268__UvdLevel_2_VclkDivider_MASK 0xff000000 +#define DPM_TABLE_268__UvdLevel_2_VclkDivider__SHIFT 0x18 +#define DPM_TABLE_269__UvdLevel_3_VclkFrequency_MASK 0xffffffff +#define DPM_TABLE_269__UvdLevel_3_VclkFrequency__SHIFT 0x0 +#define DPM_TABLE_270__UvdLevel_3_DclkFrequency_MASK 0xffffffff +#define DPM_TABLE_270__UvdLevel_3_DclkFrequency__SHIFT 0x0 +#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_271__UvdLevel_3_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_271__UvdLevel_3_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_272__UvdLevel_3_padding_1_MASK 0xff +#define DPM_TABLE_272__UvdLevel_3_padding_1__SHIFT 0x0 +#define DPM_TABLE_272__UvdLevel_3_padding_0_MASK 0xff00 +#define DPM_TABLE_272__UvdLevel_3_padding_0__SHIFT 0x8 +#define DPM_TABLE_272__UvdLevel_3_DclkDivider_MASK 0xff0000 +#define DPM_TABLE_272__UvdLevel_3_DclkDivider__SHIFT 0x10 +#define DPM_TABLE_272__UvdLevel_3_VclkDivider_MASK 0xff000000 +#define DPM_TABLE_272__UvdLevel_3_VclkDivider__SHIFT 0x18 +#define DPM_TABLE_273__UvdLevel_4_VclkFrequency_MASK 0xffffffff +#define DPM_TABLE_273__UvdLevel_4_VclkFrequency__SHIFT 0x0 +#define DPM_TABLE_274__UvdLevel_4_DclkFrequency_MASK 0xffffffff +#define DPM_TABLE_274__UvdLevel_4_DclkFrequency__SHIFT 0x0 +#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_275__UvdLevel_4_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_275__UvdLevel_4_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_276__UvdLevel_4_padding_1_MASK 0xff +#define DPM_TABLE_276__UvdLevel_4_padding_1__SHIFT 0x0 +#define DPM_TABLE_276__UvdLevel_4_padding_0_MASK 0xff00 +#define DPM_TABLE_276__UvdLevel_4_padding_0__SHIFT 0x8 +#define DPM_TABLE_276__UvdLevel_4_DclkDivider_MASK 0xff0000 +#define DPM_TABLE_276__UvdLevel_4_DclkDivider__SHIFT 0x10 +#define DPM_TABLE_276__UvdLevel_4_VclkDivider_MASK 0xff000000 +#define DPM_TABLE_276__UvdLevel_4_VclkDivider__SHIFT 0x18 +#define DPM_TABLE_277__UvdLevel_5_VclkFrequency_MASK 0xffffffff +#define DPM_TABLE_277__UvdLevel_5_VclkFrequency__SHIFT 0x0 +#define DPM_TABLE_278__UvdLevel_5_DclkFrequency_MASK 0xffffffff +#define DPM_TABLE_278__UvdLevel_5_DclkFrequency__SHIFT 0x0 +#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_279__UvdLevel_5_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_279__UvdLevel_5_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_280__UvdLevel_5_padding_1_MASK 0xff +#define DPM_TABLE_280__UvdLevel_5_padding_1__SHIFT 0x0 +#define DPM_TABLE_280__UvdLevel_5_padding_0_MASK 0xff00 +#define DPM_TABLE_280__UvdLevel_5_padding_0__SHIFT 0x8 +#define DPM_TABLE_280__UvdLevel_5_DclkDivider_MASK 0xff0000 +#define DPM_TABLE_280__UvdLevel_5_DclkDivider__SHIFT 0x10 +#define DPM_TABLE_280__UvdLevel_5_VclkDivider_MASK 0xff000000 +#define DPM_TABLE_280__UvdLevel_5_VclkDivider__SHIFT 0x18 +#define DPM_TABLE_281__UvdLevel_6_VclkFrequency_MASK 0xffffffff +#define DPM_TABLE_281__UvdLevel_6_VclkFrequency__SHIFT 0x0 +#define DPM_TABLE_282__UvdLevel_6_DclkFrequency_MASK 0xffffffff +#define DPM_TABLE_282__UvdLevel_6_DclkFrequency__SHIFT 0x0 +#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_283__UvdLevel_6_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_283__UvdLevel_6_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_284__UvdLevel_6_padding_1_MASK 0xff +#define DPM_TABLE_284__UvdLevel_6_padding_1__SHIFT 0x0 +#define DPM_TABLE_284__UvdLevel_6_padding_0_MASK 0xff00 +#define DPM_TABLE_284__UvdLevel_6_padding_0__SHIFT 0x8 +#define DPM_TABLE_284__UvdLevel_6_DclkDivider_MASK 0xff0000 +#define DPM_TABLE_284__UvdLevel_6_DclkDivider__SHIFT 0x10 +#define DPM_TABLE_284__UvdLevel_6_VclkDivider_MASK 0xff000000 +#define DPM_TABLE_284__UvdLevel_6_VclkDivider__SHIFT 0x18 +#define DPM_TABLE_285__UvdLevel_7_VclkFrequency_MASK 0xffffffff +#define DPM_TABLE_285__UvdLevel_7_VclkFrequency__SHIFT 0x0 +#define DPM_TABLE_286__UvdLevel_7_DclkFrequency_MASK 0xffffffff +#define DPM_TABLE_286__UvdLevel_7_DclkFrequency__SHIFT 0x0 +#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_287__UvdLevel_7_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_287__UvdLevel_7_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_288__UvdLevel_7_padding_1_MASK 0xff +#define DPM_TABLE_288__UvdLevel_7_padding_1__SHIFT 0x0 +#define DPM_TABLE_288__UvdLevel_7_padding_0_MASK 0xff00 +#define DPM_TABLE_288__UvdLevel_7_padding_0__SHIFT 0x8 +#define DPM_TABLE_288__UvdLevel_7_DclkDivider_MASK 0xff0000 +#define DPM_TABLE_288__UvdLevel_7_DclkDivider__SHIFT 0x10 +#define DPM_TABLE_288__UvdLevel_7_VclkDivider_MASK 0xff000000 +#define DPM_TABLE_288__UvdLevel_7_VclkDivider__SHIFT 0x18 +#define DPM_TABLE_289__VceLevel_0_Frequency_MASK 0xffffffff +#define DPM_TABLE_289__VceLevel_0_Frequency__SHIFT 0x0 +#define DPM_TABLE_290__VceLevel_0_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_290__VceLevel_0_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_290__VceLevel_0_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_290__VceLevel_0_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_291__VceLevel_0_padding_2_MASK 0xff +#define DPM_TABLE_291__VceLevel_0_padding_2__SHIFT 0x0 +#define DPM_TABLE_291__VceLevel_0_padding_1_MASK 0xff00 +#define DPM_TABLE_291__VceLevel_0_padding_1__SHIFT 0x8 +#define DPM_TABLE_291__VceLevel_0_padding_0_MASK 0xff0000 +#define DPM_TABLE_291__VceLevel_0_padding_0__SHIFT 0x10 +#define DPM_TABLE_291__VceLevel_0_Divider_MASK 0xff000000 +#define DPM_TABLE_291__VceLevel_0_Divider__SHIFT 0x18 +#define DPM_TABLE_292__VceLevel_1_Frequency_MASK 0xffffffff +#define DPM_TABLE_292__VceLevel_1_Frequency__SHIFT 0x0 +#define DPM_TABLE_293__VceLevel_1_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_293__VceLevel_1_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_293__VceLevel_1_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_293__VceLevel_1_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_294__VceLevel_1_padding_2_MASK 0xff +#define DPM_TABLE_294__VceLevel_1_padding_2__SHIFT 0x0 +#define DPM_TABLE_294__VceLevel_1_padding_1_MASK 0xff00 +#define DPM_TABLE_294__VceLevel_1_padding_1__SHIFT 0x8 +#define DPM_TABLE_294__VceLevel_1_padding_0_MASK 0xff0000 +#define DPM_TABLE_294__VceLevel_1_padding_0__SHIFT 0x10 +#define DPM_TABLE_294__VceLevel_1_Divider_MASK 0xff000000 +#define DPM_TABLE_294__VceLevel_1_Divider__SHIFT 0x18 +#define DPM_TABLE_295__VceLevel_2_Frequency_MASK 0xffffffff +#define DPM_TABLE_295__VceLevel_2_Frequency__SHIFT 0x0 +#define DPM_TABLE_296__VceLevel_2_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_296__VceLevel_2_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_296__VceLevel_2_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_296__VceLevel_2_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_297__VceLevel_2_padding_2_MASK 0xff +#define DPM_TABLE_297__VceLevel_2_padding_2__SHIFT 0x0 +#define DPM_TABLE_297__VceLevel_2_padding_1_MASK 0xff00 +#define DPM_TABLE_297__VceLevel_2_padding_1__SHIFT 0x8 +#define DPM_TABLE_297__VceLevel_2_padding_0_MASK 0xff0000 +#define DPM_TABLE_297__VceLevel_2_padding_0__SHIFT 0x10 +#define DPM_TABLE_297__VceLevel_2_Divider_MASK 0xff000000 +#define DPM_TABLE_297__VceLevel_2_Divider__SHIFT 0x18 +#define DPM_TABLE_298__VceLevel_3_Frequency_MASK 0xffffffff +#define DPM_TABLE_298__VceLevel_3_Frequency__SHIFT 0x0 +#define DPM_TABLE_299__VceLevel_3_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_299__VceLevel_3_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_299__VceLevel_3_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_299__VceLevel_3_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_300__VceLevel_3_padding_2_MASK 0xff +#define DPM_TABLE_300__VceLevel_3_padding_2__SHIFT 0x0 +#define DPM_TABLE_300__VceLevel_3_padding_1_MASK 0xff00 +#define DPM_TABLE_300__VceLevel_3_padding_1__SHIFT 0x8 +#define DPM_TABLE_300__VceLevel_3_padding_0_MASK 0xff0000 +#define DPM_TABLE_300__VceLevel_3_padding_0__SHIFT 0x10 +#define DPM_TABLE_300__VceLevel_3_Divider_MASK 0xff000000 +#define DPM_TABLE_300__VceLevel_3_Divider__SHIFT 0x18 +#define DPM_TABLE_301__VceLevel_4_Frequency_MASK 0xffffffff +#define DPM_TABLE_301__VceLevel_4_Frequency__SHIFT 0x0 +#define DPM_TABLE_302__VceLevel_4_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_302__VceLevel_4_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_302__VceLevel_4_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_302__VceLevel_4_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_303__VceLevel_4_padding_2_MASK 0xff +#define DPM_TABLE_303__VceLevel_4_padding_2__SHIFT 0x0 +#define DPM_TABLE_303__VceLevel_4_padding_1_MASK 0xff00 +#define DPM_TABLE_303__VceLevel_4_padding_1__SHIFT 0x8 +#define DPM_TABLE_303__VceLevel_4_padding_0_MASK 0xff0000 +#define DPM_TABLE_303__VceLevel_4_padding_0__SHIFT 0x10 +#define DPM_TABLE_303__VceLevel_4_Divider_MASK 0xff000000 +#define DPM_TABLE_303__VceLevel_4_Divider__SHIFT 0x18 +#define DPM_TABLE_304__VceLevel_5_Frequency_MASK 0xffffffff +#define DPM_TABLE_304__VceLevel_5_Frequency__SHIFT 0x0 +#define DPM_TABLE_305__VceLevel_5_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_305__VceLevel_5_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_305__VceLevel_5_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_305__VceLevel_5_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_306__VceLevel_5_padding_2_MASK 0xff +#define DPM_TABLE_306__VceLevel_5_padding_2__SHIFT 0x0 +#define DPM_TABLE_306__VceLevel_5_padding_1_MASK 0xff00 +#define DPM_TABLE_306__VceLevel_5_padding_1__SHIFT 0x8 +#define DPM_TABLE_306__VceLevel_5_padding_0_MASK 0xff0000 +#define DPM_TABLE_306__VceLevel_5_padding_0__SHIFT 0x10 +#define DPM_TABLE_306__VceLevel_5_Divider_MASK 0xff000000 +#define DPM_TABLE_306__VceLevel_5_Divider__SHIFT 0x18 +#define DPM_TABLE_307__VceLevel_6_Frequency_MASK 0xffffffff +#define DPM_TABLE_307__VceLevel_6_Frequency__SHIFT 0x0 +#define DPM_TABLE_308__VceLevel_6_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_308__VceLevel_6_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_308__VceLevel_6_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_308__VceLevel_6_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_309__VceLevel_6_padding_2_MASK 0xff +#define DPM_TABLE_309__VceLevel_6_padding_2__SHIFT 0x0 +#define DPM_TABLE_309__VceLevel_6_padding_1_MASK 0xff00 +#define DPM_TABLE_309__VceLevel_6_padding_1__SHIFT 0x8 +#define DPM_TABLE_309__VceLevel_6_padding_0_MASK 0xff0000 +#define DPM_TABLE_309__VceLevel_6_padding_0__SHIFT 0x10 +#define DPM_TABLE_309__VceLevel_6_Divider_MASK 0xff000000 +#define DPM_TABLE_309__VceLevel_6_Divider__SHIFT 0x18 +#define DPM_TABLE_310__VceLevel_7_Frequency_MASK 0xffffffff +#define DPM_TABLE_310__VceLevel_7_Frequency__SHIFT 0x0 +#define DPM_TABLE_311__VceLevel_7_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_311__VceLevel_7_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_311__VceLevel_7_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_311__VceLevel_7_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_312__VceLevel_7_padding_2_MASK 0xff +#define DPM_TABLE_312__VceLevel_7_padding_2__SHIFT 0x0 +#define DPM_TABLE_312__VceLevel_7_padding_1_MASK 0xff00 +#define DPM_TABLE_312__VceLevel_7_padding_1__SHIFT 0x8 +#define DPM_TABLE_312__VceLevel_7_padding_0_MASK 0xff0000 +#define DPM_TABLE_312__VceLevel_7_padding_0__SHIFT 0x10 +#define DPM_TABLE_312__VceLevel_7_Divider_MASK 0xff000000 +#define DPM_TABLE_312__VceLevel_7_Divider__SHIFT 0x18 +#define DPM_TABLE_313__AcpLevel_0_Frequency_MASK 0xffffffff +#define DPM_TABLE_313__AcpLevel_0_Frequency__SHIFT 0x0 +#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_314__AcpLevel_0_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_314__AcpLevel_0_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_315__AcpLevel_0_padding_2_MASK 0xff +#define DPM_TABLE_315__AcpLevel_0_padding_2__SHIFT 0x0 +#define DPM_TABLE_315__AcpLevel_0_padding_1_MASK 0xff00 +#define DPM_TABLE_315__AcpLevel_0_padding_1__SHIFT 0x8 +#define DPM_TABLE_315__AcpLevel_0_padding_0_MASK 0xff0000 +#define DPM_TABLE_315__AcpLevel_0_padding_0__SHIFT 0x10 +#define DPM_TABLE_315__AcpLevel_0_Divider_MASK 0xff000000 +#define DPM_TABLE_315__AcpLevel_0_Divider__SHIFT 0x18 +#define DPM_TABLE_316__AcpLevel_1_Frequency_MASK 0xffffffff +#define DPM_TABLE_316__AcpLevel_1_Frequency__SHIFT 0x0 +#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_317__AcpLevel_1_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_317__AcpLevel_1_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_318__AcpLevel_1_padding_2_MASK 0xff +#define DPM_TABLE_318__AcpLevel_1_padding_2__SHIFT 0x0 +#define DPM_TABLE_318__AcpLevel_1_padding_1_MASK 0xff00 +#define DPM_TABLE_318__AcpLevel_1_padding_1__SHIFT 0x8 +#define DPM_TABLE_318__AcpLevel_1_padding_0_MASK 0xff0000 +#define DPM_TABLE_318__AcpLevel_1_padding_0__SHIFT 0x10 +#define DPM_TABLE_318__AcpLevel_1_Divider_MASK 0xff000000 +#define DPM_TABLE_318__AcpLevel_1_Divider__SHIFT 0x18 +#define DPM_TABLE_319__AcpLevel_2_Frequency_MASK 0xffffffff +#define DPM_TABLE_319__AcpLevel_2_Frequency__SHIFT 0x0 +#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_320__AcpLevel_2_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_320__AcpLevel_2_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_321__AcpLevel_2_padding_2_MASK 0xff +#define DPM_TABLE_321__AcpLevel_2_padding_2__SHIFT 0x0 +#define DPM_TABLE_321__AcpLevel_2_padding_1_MASK 0xff00 +#define DPM_TABLE_321__AcpLevel_2_padding_1__SHIFT 0x8 +#define DPM_TABLE_321__AcpLevel_2_padding_0_MASK 0xff0000 +#define DPM_TABLE_321__AcpLevel_2_padding_0__SHIFT 0x10 +#define DPM_TABLE_321__AcpLevel_2_Divider_MASK 0xff000000 +#define DPM_TABLE_321__AcpLevel_2_Divider__SHIFT 0x18 +#define DPM_TABLE_322__AcpLevel_3_Frequency_MASK 0xffffffff +#define DPM_TABLE_322__AcpLevel_3_Frequency__SHIFT 0x0 +#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_323__AcpLevel_3_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_323__AcpLevel_3_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_324__AcpLevel_3_padding_2_MASK 0xff +#define DPM_TABLE_324__AcpLevel_3_padding_2__SHIFT 0x0 +#define DPM_TABLE_324__AcpLevel_3_padding_1_MASK 0xff00 +#define DPM_TABLE_324__AcpLevel_3_padding_1__SHIFT 0x8 +#define DPM_TABLE_324__AcpLevel_3_padding_0_MASK 0xff0000 +#define DPM_TABLE_324__AcpLevel_3_padding_0__SHIFT 0x10 +#define DPM_TABLE_324__AcpLevel_3_Divider_MASK 0xff000000 +#define DPM_TABLE_324__AcpLevel_3_Divider__SHIFT 0x18 +#define DPM_TABLE_325__AcpLevel_4_Frequency_MASK 0xffffffff +#define DPM_TABLE_325__AcpLevel_4_Frequency__SHIFT 0x0 +#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_326__AcpLevel_4_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_326__AcpLevel_4_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_327__AcpLevel_4_padding_2_MASK 0xff +#define DPM_TABLE_327__AcpLevel_4_padding_2__SHIFT 0x0 +#define DPM_TABLE_327__AcpLevel_4_padding_1_MASK 0xff00 +#define DPM_TABLE_327__AcpLevel_4_padding_1__SHIFT 0x8 +#define DPM_TABLE_327__AcpLevel_4_padding_0_MASK 0xff0000 +#define DPM_TABLE_327__AcpLevel_4_padding_0__SHIFT 0x10 +#define DPM_TABLE_327__AcpLevel_4_Divider_MASK 0xff000000 +#define DPM_TABLE_327__AcpLevel_4_Divider__SHIFT 0x18 +#define DPM_TABLE_328__AcpLevel_5_Frequency_MASK 0xffffffff +#define DPM_TABLE_328__AcpLevel_5_Frequency__SHIFT 0x0 +#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_329__AcpLevel_5_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_329__AcpLevel_5_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_330__AcpLevel_5_padding_2_MASK 0xff +#define DPM_TABLE_330__AcpLevel_5_padding_2__SHIFT 0x0 +#define DPM_TABLE_330__AcpLevel_5_padding_1_MASK 0xff00 +#define DPM_TABLE_330__AcpLevel_5_padding_1__SHIFT 0x8 +#define DPM_TABLE_330__AcpLevel_5_padding_0_MASK 0xff0000 +#define DPM_TABLE_330__AcpLevel_5_padding_0__SHIFT 0x10 +#define DPM_TABLE_330__AcpLevel_5_Divider_MASK 0xff000000 +#define DPM_TABLE_330__AcpLevel_5_Divider__SHIFT 0x18 +#define DPM_TABLE_331__AcpLevel_6_Frequency_MASK 0xffffffff +#define DPM_TABLE_331__AcpLevel_6_Frequency__SHIFT 0x0 +#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_332__AcpLevel_6_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_332__AcpLevel_6_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_333__AcpLevel_6_padding_2_MASK 0xff +#define DPM_TABLE_333__AcpLevel_6_padding_2__SHIFT 0x0 +#define DPM_TABLE_333__AcpLevel_6_padding_1_MASK 0xff00 +#define DPM_TABLE_333__AcpLevel_6_padding_1__SHIFT 0x8 +#define DPM_TABLE_333__AcpLevel_6_padding_0_MASK 0xff0000 +#define DPM_TABLE_333__AcpLevel_6_padding_0__SHIFT 0x10 +#define DPM_TABLE_333__AcpLevel_6_Divider_MASK 0xff000000 +#define DPM_TABLE_333__AcpLevel_6_Divider__SHIFT 0x18 +#define DPM_TABLE_334__AcpLevel_7_Frequency_MASK 0xffffffff +#define DPM_TABLE_334__AcpLevel_7_Frequency__SHIFT 0x0 +#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_335__AcpLevel_7_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_335__AcpLevel_7_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_336__AcpLevel_7_padding_2_MASK 0xff +#define DPM_TABLE_336__AcpLevel_7_padding_2__SHIFT 0x0 +#define DPM_TABLE_336__AcpLevel_7_padding_1_MASK 0xff00 +#define DPM_TABLE_336__AcpLevel_7_padding_1__SHIFT 0x8 +#define DPM_TABLE_336__AcpLevel_7_padding_0_MASK 0xff0000 +#define DPM_TABLE_336__AcpLevel_7_padding_0__SHIFT 0x10 +#define DPM_TABLE_336__AcpLevel_7_Divider_MASK 0xff000000 +#define DPM_TABLE_336__AcpLevel_7_Divider__SHIFT 0x18 +#define DPM_TABLE_337__SamuLevel_0_Frequency_MASK 0xffffffff +#define DPM_TABLE_337__SamuLevel_0_Frequency__SHIFT 0x0 +#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_338__SamuLevel_0_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_338__SamuLevel_0_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_339__SamuLevel_0_padding_2_MASK 0xff +#define DPM_TABLE_339__SamuLevel_0_padding_2__SHIFT 0x0 +#define DPM_TABLE_339__SamuLevel_0_padding_1_MASK 0xff00 +#define DPM_TABLE_339__SamuLevel_0_padding_1__SHIFT 0x8 +#define DPM_TABLE_339__SamuLevel_0_padding_0_MASK 0xff0000 +#define DPM_TABLE_339__SamuLevel_0_padding_0__SHIFT 0x10 +#define DPM_TABLE_339__SamuLevel_0_Divider_MASK 0xff000000 +#define DPM_TABLE_339__SamuLevel_0_Divider__SHIFT 0x18 +#define DPM_TABLE_340__SamuLevel_1_Frequency_MASK 0xffffffff +#define DPM_TABLE_340__SamuLevel_1_Frequency__SHIFT 0x0 +#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_341__SamuLevel_1_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_341__SamuLevel_1_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_342__SamuLevel_1_padding_2_MASK 0xff +#define DPM_TABLE_342__SamuLevel_1_padding_2__SHIFT 0x0 +#define DPM_TABLE_342__SamuLevel_1_padding_1_MASK 0xff00 +#define DPM_TABLE_342__SamuLevel_1_padding_1__SHIFT 0x8 +#define DPM_TABLE_342__SamuLevel_1_padding_0_MASK 0xff0000 +#define DPM_TABLE_342__SamuLevel_1_padding_0__SHIFT 0x10 +#define DPM_TABLE_342__SamuLevel_1_Divider_MASK 0xff000000 +#define DPM_TABLE_342__SamuLevel_1_Divider__SHIFT 0x18 +#define DPM_TABLE_343__SamuLevel_2_Frequency_MASK 0xffffffff +#define DPM_TABLE_343__SamuLevel_2_Frequency__SHIFT 0x0 +#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_344__SamuLevel_2_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_344__SamuLevel_2_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_345__SamuLevel_2_padding_2_MASK 0xff +#define DPM_TABLE_345__SamuLevel_2_padding_2__SHIFT 0x0 +#define DPM_TABLE_345__SamuLevel_2_padding_1_MASK 0xff00 +#define DPM_TABLE_345__SamuLevel_2_padding_1__SHIFT 0x8 +#define DPM_TABLE_345__SamuLevel_2_padding_0_MASK 0xff0000 +#define DPM_TABLE_345__SamuLevel_2_padding_0__SHIFT 0x10 +#define DPM_TABLE_345__SamuLevel_2_Divider_MASK 0xff000000 +#define DPM_TABLE_345__SamuLevel_2_Divider__SHIFT 0x18 +#define DPM_TABLE_346__SamuLevel_3_Frequency_MASK 0xffffffff +#define DPM_TABLE_346__SamuLevel_3_Frequency__SHIFT 0x0 +#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_347__SamuLevel_3_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_347__SamuLevel_3_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_348__SamuLevel_3_padding_2_MASK 0xff +#define DPM_TABLE_348__SamuLevel_3_padding_2__SHIFT 0x0 +#define DPM_TABLE_348__SamuLevel_3_padding_1_MASK 0xff00 +#define DPM_TABLE_348__SamuLevel_3_padding_1__SHIFT 0x8 +#define DPM_TABLE_348__SamuLevel_3_padding_0_MASK 0xff0000 +#define DPM_TABLE_348__SamuLevel_3_padding_0__SHIFT 0x10 +#define DPM_TABLE_348__SamuLevel_3_Divider_MASK 0xff000000 +#define DPM_TABLE_348__SamuLevel_3_Divider__SHIFT 0x18 +#define DPM_TABLE_349__SamuLevel_4_Frequency_MASK 0xffffffff +#define DPM_TABLE_349__SamuLevel_4_Frequency__SHIFT 0x0 +#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_350__SamuLevel_4_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_350__SamuLevel_4_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_351__SamuLevel_4_padding_2_MASK 0xff +#define DPM_TABLE_351__SamuLevel_4_padding_2__SHIFT 0x0 +#define DPM_TABLE_351__SamuLevel_4_padding_1_MASK 0xff00 +#define DPM_TABLE_351__SamuLevel_4_padding_1__SHIFT 0x8 +#define DPM_TABLE_351__SamuLevel_4_padding_0_MASK 0xff0000 +#define DPM_TABLE_351__SamuLevel_4_padding_0__SHIFT 0x10 +#define DPM_TABLE_351__SamuLevel_4_Divider_MASK 0xff000000 +#define DPM_TABLE_351__SamuLevel_4_Divider__SHIFT 0x18 +#define DPM_TABLE_352__SamuLevel_5_Frequency_MASK 0xffffffff +#define DPM_TABLE_352__SamuLevel_5_Frequency__SHIFT 0x0 +#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_353__SamuLevel_5_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_353__SamuLevel_5_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_354__SamuLevel_5_padding_2_MASK 0xff +#define DPM_TABLE_354__SamuLevel_5_padding_2__SHIFT 0x0 +#define DPM_TABLE_354__SamuLevel_5_padding_1_MASK 0xff00 +#define DPM_TABLE_354__SamuLevel_5_padding_1__SHIFT 0x8 +#define DPM_TABLE_354__SamuLevel_5_padding_0_MASK 0xff0000 +#define DPM_TABLE_354__SamuLevel_5_padding_0__SHIFT 0x10 +#define DPM_TABLE_354__SamuLevel_5_Divider_MASK 0xff000000 +#define DPM_TABLE_354__SamuLevel_5_Divider__SHIFT 0x18 +#define DPM_TABLE_355__SamuLevel_6_Frequency_MASK 0xffffffff +#define DPM_TABLE_355__SamuLevel_6_Frequency__SHIFT 0x0 +#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_356__SamuLevel_6_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_356__SamuLevel_6_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_357__SamuLevel_6_padding_2_MASK 0xff +#define DPM_TABLE_357__SamuLevel_6_padding_2__SHIFT 0x0 +#define DPM_TABLE_357__SamuLevel_6_padding_1_MASK 0xff00 +#define DPM_TABLE_357__SamuLevel_6_padding_1__SHIFT 0x8 +#define DPM_TABLE_357__SamuLevel_6_padding_0_MASK 0xff0000 +#define DPM_TABLE_357__SamuLevel_6_padding_0__SHIFT 0x10 +#define DPM_TABLE_357__SamuLevel_6_Divider_MASK 0xff000000 +#define DPM_TABLE_357__SamuLevel_6_Divider__SHIFT 0x18 +#define DPM_TABLE_358__SamuLevel_7_Frequency_MASK 0xffffffff +#define DPM_TABLE_358__SamuLevel_7_Frequency__SHIFT 0x0 +#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Phases_MASK 0xff +#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_359__SamuLevel_7_MinVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_359__SamuLevel_7_MinVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_360__SamuLevel_7_padding_2_MASK 0xff +#define DPM_TABLE_360__SamuLevel_7_padding_2__SHIFT 0x0 +#define DPM_TABLE_360__SamuLevel_7_padding_1_MASK 0xff00 +#define DPM_TABLE_360__SamuLevel_7_padding_1__SHIFT 0x8 +#define DPM_TABLE_360__SamuLevel_7_padding_0_MASK 0xff0000 +#define DPM_TABLE_360__SamuLevel_7_padding_0__SHIFT 0x10 +#define DPM_TABLE_360__SamuLevel_7_Divider_MASK 0xff000000 +#define DPM_TABLE_360__SamuLevel_7_Divider__SHIFT 0x18 +#define DPM_TABLE_361__Ulv_CcPwrDynRm_MASK 0xffffffff +#define DPM_TABLE_361__Ulv_CcPwrDynRm__SHIFT 0x0 +#define DPM_TABLE_362__Ulv_CcPwrDynRm1_MASK 0xffffffff +#define DPM_TABLE_362__Ulv_CcPwrDynRm1__SHIFT 0x0 +#define DPM_TABLE_363__Ulv_VddcPhase_MASK 0xff +#define DPM_TABLE_363__Ulv_VddcPhase__SHIFT 0x0 +#define DPM_TABLE_363__Ulv_VddcOffsetVid_MASK 0xff00 +#define DPM_TABLE_363__Ulv_VddcOffsetVid__SHIFT 0x8 +#define DPM_TABLE_363__Ulv_VddcOffset_MASK 0xffff0000 +#define DPM_TABLE_363__Ulv_VddcOffset__SHIFT 0x10 +#define DPM_TABLE_364__Ulv_Reserved_MASK 0xffffffff +#define DPM_TABLE_364__Ulv_Reserved__SHIFT 0x0 +#define DPM_TABLE_365__SclkStepSize_MASK 0xffffffff +#define DPM_TABLE_365__SclkStepSize__SHIFT 0x0 +#define DPM_TABLE_366__Smio_0_MASK 0xffffffff +#define DPM_TABLE_366__Smio_0__SHIFT 0x0 +#define DPM_TABLE_367__Smio_1_MASK 0xffffffff +#define DPM_TABLE_367__Smio_1__SHIFT 0x0 +#define DPM_TABLE_368__Smio_2_MASK 0xffffffff +#define DPM_TABLE_368__Smio_2__SHIFT 0x0 +#define DPM_TABLE_369__Smio_3_MASK 0xffffffff +#define DPM_TABLE_369__Smio_3__SHIFT 0x0 +#define DPM_TABLE_370__Smio_4_MASK 0xffffffff +#define DPM_TABLE_370__Smio_4__SHIFT 0x0 +#define DPM_TABLE_371__Smio_5_MASK 0xffffffff +#define DPM_TABLE_371__Smio_5__SHIFT 0x0 +#define DPM_TABLE_372__Smio_6_MASK 0xffffffff +#define DPM_TABLE_372__Smio_6__SHIFT 0x0 +#define DPM_TABLE_373__Smio_7_MASK 0xffffffff +#define DPM_TABLE_373__Smio_7__SHIFT 0x0 +#define DPM_TABLE_374__Smio_8_MASK 0xffffffff +#define DPM_TABLE_374__Smio_8__SHIFT 0x0 +#define DPM_TABLE_375__Smio_9_MASK 0xffffffff +#define DPM_TABLE_375__Smio_9__SHIFT 0x0 +#define DPM_TABLE_376__Smio_10_MASK 0xffffffff +#define DPM_TABLE_376__Smio_10__SHIFT 0x0 +#define DPM_TABLE_377__Smio_11_MASK 0xffffffff +#define DPM_TABLE_377__Smio_11__SHIFT 0x0 +#define DPM_TABLE_378__Smio_12_MASK 0xffffffff +#define DPM_TABLE_378__Smio_12__SHIFT 0x0 +#define DPM_TABLE_379__Smio_13_MASK 0xffffffff +#define DPM_TABLE_379__Smio_13__SHIFT 0x0 +#define DPM_TABLE_380__Smio_14_MASK 0xffffffff +#define DPM_TABLE_380__Smio_14__SHIFT 0x0 +#define DPM_TABLE_381__Smio_15_MASK 0xffffffff +#define DPM_TABLE_381__Smio_15__SHIFT 0x0 +#define DPM_TABLE_382__Smio_16_MASK 0xffffffff +#define DPM_TABLE_382__Smio_16__SHIFT 0x0 +#define DPM_TABLE_383__Smio_17_MASK 0xffffffff +#define DPM_TABLE_383__Smio_17__SHIFT 0x0 +#define DPM_TABLE_384__Smio_18_MASK 0xffffffff +#define DPM_TABLE_384__Smio_18__SHIFT 0x0 +#define DPM_TABLE_385__Smio_19_MASK 0xffffffff +#define DPM_TABLE_385__Smio_19__SHIFT 0x0 +#define DPM_TABLE_386__Smio_20_MASK 0xffffffff +#define DPM_TABLE_386__Smio_20__SHIFT 0x0 +#define DPM_TABLE_387__Smio_21_MASK 0xffffffff +#define DPM_TABLE_387__Smio_21__SHIFT 0x0 +#define DPM_TABLE_388__Smio_22_MASK 0xffffffff +#define DPM_TABLE_388__Smio_22__SHIFT 0x0 +#define DPM_TABLE_389__Smio_23_MASK 0xffffffff +#define DPM_TABLE_389__Smio_23__SHIFT 0x0 +#define DPM_TABLE_390__Smio_24_MASK 0xffffffff +#define DPM_TABLE_390__Smio_24__SHIFT 0x0 +#define DPM_TABLE_391__Smio_25_MASK 0xffffffff +#define DPM_TABLE_391__Smio_25__SHIFT 0x0 +#define DPM_TABLE_392__Smio_26_MASK 0xffffffff +#define DPM_TABLE_392__Smio_26__SHIFT 0x0 +#define DPM_TABLE_393__Smio_27_MASK 0xffffffff +#define DPM_TABLE_393__Smio_27__SHIFT 0x0 +#define DPM_TABLE_394__Smio_28_MASK 0xffffffff +#define DPM_TABLE_394__Smio_28__SHIFT 0x0 +#define DPM_TABLE_395__Smio_29_MASK 0xffffffff +#define DPM_TABLE_395__Smio_29__SHIFT 0x0 +#define DPM_TABLE_396__Smio_30_MASK 0xffffffff +#define DPM_TABLE_396__Smio_30__SHIFT 0x0 +#define DPM_TABLE_397__Smio_31_MASK 0xffffffff +#define DPM_TABLE_397__Smio_31__SHIFT 0x0 +#define DPM_TABLE_398__SamuBootLevel_MASK 0xff +#define DPM_TABLE_398__SamuBootLevel__SHIFT 0x0 +#define DPM_TABLE_398__AcpBootLevel_MASK 0xff00 +#define DPM_TABLE_398__AcpBootLevel__SHIFT 0x8 +#define DPM_TABLE_398__VceBootLevel_MASK 0xff0000 +#define DPM_TABLE_398__VceBootLevel__SHIFT 0x10 +#define DPM_TABLE_398__UvdBootLevel_MASK 0xff000000 +#define DPM_TABLE_398__UvdBootLevel__SHIFT 0x18 +#define DPM_TABLE_399__GraphicsInterval_MASK 0xff +#define DPM_TABLE_399__GraphicsInterval__SHIFT 0x0 +#define DPM_TABLE_399__GraphicsThermThrottleEnable_MASK 0xff00 +#define DPM_TABLE_399__GraphicsThermThrottleEnable__SHIFT 0x8 +#define DPM_TABLE_399__GraphicsVoltageChangeEnable_MASK 0xff0000 +#define DPM_TABLE_399__GraphicsVoltageChangeEnable__SHIFT 0x10 +#define DPM_TABLE_399__GraphicsBootLevel_MASK 0xff000000 +#define DPM_TABLE_399__GraphicsBootLevel__SHIFT 0x18 +#define DPM_TABLE_400__TemperatureLimitHigh_MASK 0xffff +#define DPM_TABLE_400__TemperatureLimitHigh__SHIFT 0x0 +#define DPM_TABLE_400__ThermalInterval_MASK 0xff0000 +#define DPM_TABLE_400__ThermalInterval__SHIFT 0x10 +#define DPM_TABLE_400__VoltageInterval_MASK 0xff000000 +#define DPM_TABLE_400__VoltageInterval__SHIFT 0x18 +#define DPM_TABLE_401__MemoryVoltageChangeEnable_MASK 0xff +#define DPM_TABLE_401__MemoryVoltageChangeEnable__SHIFT 0x0 +#define DPM_TABLE_401__MemoryBootLevel_MASK 0xff00 +#define DPM_TABLE_401__MemoryBootLevel__SHIFT 0x8 +#define DPM_TABLE_401__TemperatureLimitLow_MASK 0xffff0000 +#define DPM_TABLE_401__TemperatureLimitLow__SHIFT 0x10 +#define DPM_TABLE_402__MemoryThermThrottleEnable_MASK 0xff +#define DPM_TABLE_402__MemoryThermThrottleEnable__SHIFT 0x0 +#define DPM_TABLE_402__MemoryInterval_MASK 0xff00 +#define DPM_TABLE_402__MemoryInterval__SHIFT 0x8 +#define DPM_TABLE_402__BootMVdd_MASK 0xffff0000 +#define DPM_TABLE_402__BootMVdd__SHIFT 0x10 +#define DPM_TABLE_403__PhaseResponseTime_MASK 0xffff +#define DPM_TABLE_403__PhaseResponseTime__SHIFT 0x0 +#define DPM_TABLE_403__VoltageResponseTime_MASK 0xffff0000 +#define DPM_TABLE_403__VoltageResponseTime__SHIFT 0x10 +#define DPM_TABLE_404__DTEMode_MASK 0xff +#define DPM_TABLE_404__DTEMode__SHIFT 0x0 +#define DPM_TABLE_404__DTEInterval_MASK 0xff00 +#define DPM_TABLE_404__DTEInterval__SHIFT 0x8 +#define DPM_TABLE_404__PCIeGenInterval_MASK 0xff0000 +#define DPM_TABLE_404__PCIeGenInterval__SHIFT 0x10 +#define DPM_TABLE_404__PCIeBootLinkLevel_MASK 0xff000000 +#define DPM_TABLE_404__PCIeBootLinkLevel__SHIFT 0x18 +#define DPM_TABLE_405__ThermGpio_MASK 0xff +#define DPM_TABLE_405__ThermGpio__SHIFT 0x0 +#define DPM_TABLE_405__AcDcGpio_MASK 0xff00 +#define DPM_TABLE_405__AcDcGpio__SHIFT 0x8 +#define DPM_TABLE_405__VRHotGpio_MASK 0xff0000 +#define DPM_TABLE_405__VRHotGpio__SHIFT 0x10 +#define DPM_TABLE_405__SVI2Enable_MASK 0xff000000 +#define DPM_TABLE_405__SVI2Enable__SHIFT 0x18 +#define DPM_TABLE_406__PPM_TemperatureLimit_MASK 0xffff +#define DPM_TABLE_406__PPM_TemperatureLimit__SHIFT 0x0 +#define DPM_TABLE_406__PPM_PkgPwrLimit_MASK 0xffff0000 +#define DPM_TABLE_406__PPM_PkgPwrLimit__SHIFT 0x10 +#define DPM_TABLE_407__TargetTdp_MASK 0xffff +#define DPM_TABLE_407__TargetTdp__SHIFT 0x0 +#define DPM_TABLE_407__DefaultTdp_MASK 0xffff0000 +#define DPM_TABLE_407__DefaultTdp__SHIFT 0x10 +#define DPM_TABLE_408__FpsLowThreshold_MASK 0xffff +#define DPM_TABLE_408__FpsLowThreshold__SHIFT 0x0 +#define DPM_TABLE_408__FpsHighThreshold_MASK 0xffff0000 +#define DPM_TABLE_408__FpsHighThreshold__SHIFT 0x10 +#define DPM_TABLE_409__BAPMTI_R_0_1_0_MASK 0xffff +#define DPM_TABLE_409__BAPMTI_R_0_1_0__SHIFT 0x0 +#define DPM_TABLE_409__BAPMTI_R_0_0_0_MASK 0xffff0000 +#define DPM_TABLE_409__BAPMTI_R_0_0_0__SHIFT 0x10 +#define DPM_TABLE_410__BAPMTI_R_1_0_0_MASK 0xffff +#define DPM_TABLE_410__BAPMTI_R_1_0_0__SHIFT 0x0 +#define DPM_TABLE_410__BAPMTI_R_0_2_0_MASK 0xffff0000 +#define DPM_TABLE_410__BAPMTI_R_0_2_0__SHIFT 0x10 +#define DPM_TABLE_411__BAPMTI_R_1_2_0_MASK 0xffff +#define DPM_TABLE_411__BAPMTI_R_1_2_0__SHIFT 0x0 +#define DPM_TABLE_411__BAPMTI_R_1_1_0_MASK 0xffff0000 +#define DPM_TABLE_411__BAPMTI_R_1_1_0__SHIFT 0x10 +#define DPM_TABLE_412__BAPMTI_R_2_1_0_MASK 0xffff +#define DPM_TABLE_412__BAPMTI_R_2_1_0__SHIFT 0x0 +#define DPM_TABLE_412__BAPMTI_R_2_0_0_MASK 0xffff0000 +#define DPM_TABLE_412__BAPMTI_R_2_0_0__SHIFT 0x10 +#define DPM_TABLE_413__BAPMTI_R_3_0_0_MASK 0xffff +#define DPM_TABLE_413__BAPMTI_R_3_0_0__SHIFT 0x0 +#define DPM_TABLE_413__BAPMTI_R_2_2_0_MASK 0xffff0000 +#define DPM_TABLE_413__BAPMTI_R_2_2_0__SHIFT 0x10 +#define DPM_TABLE_414__BAPMTI_R_3_2_0_MASK 0xffff +#define DPM_TABLE_414__BAPMTI_R_3_2_0__SHIFT 0x0 +#define DPM_TABLE_414__BAPMTI_R_3_1_0_MASK 0xffff0000 +#define DPM_TABLE_414__BAPMTI_R_3_1_0__SHIFT 0x10 +#define DPM_TABLE_415__BAPMTI_R_4_1_0_MASK 0xffff +#define DPM_TABLE_415__BAPMTI_R_4_1_0__SHIFT 0x0 +#define DPM_TABLE_415__BAPMTI_R_4_0_0_MASK 0xffff0000 +#define DPM_TABLE_415__BAPMTI_R_4_0_0__SHIFT 0x10 +#define DPM_TABLE_416__BAPMTI_RC_0_0_0_MASK 0xffff +#define DPM_TABLE_416__BAPMTI_RC_0_0_0__SHIFT 0x0 +#define DPM_TABLE_416__BAPMTI_R_4_2_0_MASK 0xffff0000 +#define DPM_TABLE_416__BAPMTI_R_4_2_0__SHIFT 0x10 +#define DPM_TABLE_417__BAPMTI_RC_0_2_0_MASK 0xffff +#define DPM_TABLE_417__BAPMTI_RC_0_2_0__SHIFT 0x0 +#define DPM_TABLE_417__BAPMTI_RC_0_1_0_MASK 0xffff0000 +#define DPM_TABLE_417__BAPMTI_RC_0_1_0__SHIFT 0x10 +#define DPM_TABLE_418__BAPMTI_RC_1_1_0_MASK 0xffff +#define DPM_TABLE_418__BAPMTI_RC_1_1_0__SHIFT 0x0 +#define DPM_TABLE_418__BAPMTI_RC_1_0_0_MASK 0xffff0000 +#define DPM_TABLE_418__BAPMTI_RC_1_0_0__SHIFT 0x10 +#define DPM_TABLE_419__BAPMTI_RC_2_0_0_MASK 0xffff +#define DPM_TABLE_419__BAPMTI_RC_2_0_0__SHIFT 0x0 +#define DPM_TABLE_419__BAPMTI_RC_1_2_0_MASK 0xffff0000 +#define DPM_TABLE_419__BAPMTI_RC_1_2_0__SHIFT 0x10 +#define DPM_TABLE_420__BAPMTI_RC_2_2_0_MASK 0xffff +#define DPM_TABLE_420__BAPMTI_RC_2_2_0__SHIFT 0x0 +#define DPM_TABLE_420__BAPMTI_RC_2_1_0_MASK 0xffff0000 +#define DPM_TABLE_420__BAPMTI_RC_2_1_0__SHIFT 0x10 +#define DPM_TABLE_421__BAPMTI_RC_3_1_0_MASK 0xffff +#define DPM_TABLE_421__BAPMTI_RC_3_1_0__SHIFT 0x0 +#define DPM_TABLE_421__BAPMTI_RC_3_0_0_MASK 0xffff0000 +#define DPM_TABLE_421__BAPMTI_RC_3_0_0__SHIFT 0x10 +#define DPM_TABLE_422__BAPMTI_RC_4_0_0_MASK 0xffff +#define DPM_TABLE_422__BAPMTI_RC_4_0_0__SHIFT 0x0 +#define DPM_TABLE_422__BAPMTI_RC_3_2_0_MASK 0xffff0000 +#define DPM_TABLE_422__BAPMTI_RC_3_2_0__SHIFT 0x10 +#define DPM_TABLE_423__BAPMTI_RC_4_2_0_MASK 0xffff +#define DPM_TABLE_423__BAPMTI_RC_4_2_0__SHIFT 0x0 +#define DPM_TABLE_423__BAPMTI_RC_4_1_0_MASK 0xffff0000 +#define DPM_TABLE_423__BAPMTI_RC_4_1_0__SHIFT 0x10 +#define DPM_TABLE_424__GpuTjHyst_MASK 0xff +#define DPM_TABLE_424__GpuTjHyst__SHIFT 0x0 +#define DPM_TABLE_424__GpuTjMax_MASK 0xff00 +#define DPM_TABLE_424__GpuTjMax__SHIFT 0x8 +#define DPM_TABLE_424__DTETjOffset_MASK 0xff0000 +#define DPM_TABLE_424__DTETjOffset__SHIFT 0x10 +#define DPM_TABLE_424__DTEAmbientTempBase_MASK 0xff000000 +#define DPM_TABLE_424__DTEAmbientTempBase__SHIFT 0x18 +#define DPM_TABLE_425__BootVoltage_Phases_MASK 0xff +#define DPM_TABLE_425__BootVoltage_Phases__SHIFT 0x0 +#define DPM_TABLE_425__BootVoltage_VddGfx_MASK 0xff00 +#define DPM_TABLE_425__BootVoltage_VddGfx__SHIFT 0x8 +#define DPM_TABLE_425__BootVoltage_Vddci_MASK 0xff0000 +#define DPM_TABLE_425__BootVoltage_Vddci__SHIFT 0x10 +#define DPM_TABLE_425__BootVoltage_Vddc_MASK 0xff000000 +#define DPM_TABLE_425__BootVoltage_Vddc__SHIFT 0x18 +#define DPM_TABLE_426__BAPM_TEMP_GRADIENT_MASK 0xffffffff +#define DPM_TABLE_426__BAPM_TEMP_GRADIENT__SHIFT 0x0 +#define DPM_TABLE_427__LowSclkInterruptThreshold_MASK 0xffffffff +#define DPM_TABLE_427__LowSclkInterruptThreshold__SHIFT 0x0 +#define DPM_TABLE_428__VddGfxReChkWait_MASK 0xffffffff +#define DPM_TABLE_428__VddGfxReChkWait__SHIFT 0x0 +#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_1_MASK 0xff +#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_1__SHIFT 0x0 +#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_0_MASK 0xff00 +#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_0__SHIFT 0x8 +#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_maxVID_MASK 0xff0000 +#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_maxVID__SHIFT 0x10 +#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_minVID_MASK 0xff000000 +#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_minVID__SHIFT 0x18 +#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_3_MASK 0xff +#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_3__SHIFT 0x0 +#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_2_MASK 0xff00 +#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_2__SHIFT 0x8 +#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_1_MASK 0xff0000 +#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_1__SHIFT 0x10 +#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_0_MASK 0xff000000 +#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_0__SHIFT 0x18 +#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_7_MASK 0xff +#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_7__SHIFT 0x0 +#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_6_MASK 0xff00 +#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_6__SHIFT 0x8 +#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_5_MASK 0xff0000 +#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_5__SHIFT 0x10 +#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_4_MASK 0xff000000 +#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_4__SHIFT 0x18 +#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_1_MASK 0xff +#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_1__SHIFT 0x0 +#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_0_MASK 0xff00 +#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_0__SHIFT 0x8 +#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_maxVID_MASK 0xff0000 +#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_maxVID__SHIFT 0x10 +#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_minVID_MASK 0xff000000 +#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_minVID__SHIFT 0x18 +#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_3_MASK 0xff +#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_3__SHIFT 0x0 +#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_2_MASK 0xff00 +#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_2__SHIFT 0x8 +#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_1_MASK 0xff0000 +#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_1__SHIFT 0x10 +#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_0_MASK 0xff000000 +#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_0__SHIFT 0x18 +#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_7_MASK 0xff +#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_7__SHIFT 0x0 +#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_6_MASK 0xff00 +#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_6__SHIFT 0x8 +#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_5_MASK 0xff0000 +#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_5__SHIFT 0x10 +#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_4_MASK 0xff000000 +#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_4__SHIFT 0x18 +#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_1_MASK 0xff +#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_1__SHIFT 0x0 +#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_0_MASK 0xff00 +#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_0__SHIFT 0x8 +#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_maxVID_MASK 0xff0000 +#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_maxVID__SHIFT 0x10 +#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_minVID_MASK 0xff000000 +#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_minVID__SHIFT 0x18 +#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_3_MASK 0xff +#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_3__SHIFT 0x0 +#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_2_MASK 0xff00 +#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_2__SHIFT 0x8 +#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_1_MASK 0xff0000 +#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_1__SHIFT 0x10 +#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_0_MASK 0xff000000 +#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_0__SHIFT 0x18 +#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_7_MASK 0xff +#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_7__SHIFT 0x0 +#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_6_MASK 0xff00 +#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_6__SHIFT 0x8 +#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_5_MASK 0xff0000 +#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_5__SHIFT 0x10 +#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_4_MASK 0xff000000 +#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_4__SHIFT 0x18 +#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_1_MASK 0xff +#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_1__SHIFT 0x0 +#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_0_MASK 0xff00 +#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_0__SHIFT 0x8 +#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_maxVID_MASK 0xff0000 +#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_maxVID__SHIFT 0x10 +#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_minVID_MASK 0xff000000 +#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_minVID__SHIFT 0x18 +#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_3_MASK 0xff +#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_3__SHIFT 0x0 +#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_2_MASK 0xff00 +#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_2__SHIFT 0x8 +#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_1_MASK 0xff0000 +#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_1__SHIFT 0x10 +#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_0_MASK 0xff000000 +#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_0__SHIFT 0x18 +#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_7_MASK 0xff +#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_7__SHIFT 0x0 +#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_6_MASK 0xff00 +#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_6__SHIFT 0x8 +#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_5_MASK 0xff0000 +#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_5__SHIFT 0x10 +#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_4_MASK 0xff000000 +#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_4__SHIFT 0x18 +#define SOFT_REGISTERS_TABLE_1__RefClockFrequency_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_1__RefClockFrequency__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_2__PmTimerPeriod_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_2__PmTimerPeriod__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_3__FeatureEnables_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_3__FeatureEnables__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_4__PreVBlankGap_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_4__PreVBlankGap__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_5__VBlankTimeout_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_5__VBlankTimeout__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_6__TrainTimeGap_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_6__TrainTimeGap__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_7__MvddSwitchTime_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_7__MvddSwitchTime__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_8__LongestAcpiTrainTime_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_8__LongestAcpiTrainTime__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_9__AcpiDelay_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_9__AcpiDelay__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_10__G5TrainTime_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_10__G5TrainTime__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_11__DelayMpllPwron_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_11__DelayMpllPwron__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_12__VoltageChangeTimeout_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_12__VoltageChangeTimeout__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_13__HandshakeDisables_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_13__HandshakeDisables__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_14__DisplayPhy4Config_MASK 0xff +#define SOFT_REGISTERS_TABLE_14__DisplayPhy4Config__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_14__DisplayPhy3Config_MASK 0xff00 +#define SOFT_REGISTERS_TABLE_14__DisplayPhy3Config__SHIFT 0x8 +#define SOFT_REGISTERS_TABLE_14__DisplayPhy2Config_MASK 0xff0000 +#define SOFT_REGISTERS_TABLE_14__DisplayPhy2Config__SHIFT 0x10 +#define SOFT_REGISTERS_TABLE_14__DisplayPhy1Config_MASK 0xff000000 +#define SOFT_REGISTERS_TABLE_14__DisplayPhy1Config__SHIFT 0x18 +#define SOFT_REGISTERS_TABLE_15__DisplayPhy8Config_MASK 0xff +#define SOFT_REGISTERS_TABLE_15__DisplayPhy8Config__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_15__DisplayPhy7Config_MASK 0xff00 +#define SOFT_REGISTERS_TABLE_15__DisplayPhy7Config__SHIFT 0x8 +#define SOFT_REGISTERS_TABLE_15__DisplayPhy6Config_MASK 0xff0000 +#define SOFT_REGISTERS_TABLE_15__DisplayPhy6Config__SHIFT 0x10 +#define SOFT_REGISTERS_TABLE_15__DisplayPhy5Config_MASK 0xff000000 +#define SOFT_REGISTERS_TABLE_15__DisplayPhy5Config__SHIFT 0x18 +#define SOFT_REGISTERS_TABLE_16__AverageGraphicsActivity_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_16__AverageGraphicsActivity__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_17__AverageMemoryActivity_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_17__AverageMemoryActivity__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_18__AverageGioActivity_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_18__AverageGioActivity__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_19__PCIeDpmEnabledLevels_MASK 0xff +#define SOFT_REGISTERS_TABLE_19__PCIeDpmEnabledLevels__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_19__LClkDpmEnabledLevels_MASK 0xff00 +#define SOFT_REGISTERS_TABLE_19__LClkDpmEnabledLevels__SHIFT 0x8 +#define SOFT_REGISTERS_TABLE_19__MClkDpmEnabledLevels_MASK 0xff0000 +#define SOFT_REGISTERS_TABLE_19__MClkDpmEnabledLevels__SHIFT 0x10 +#define SOFT_REGISTERS_TABLE_19__SClkDpmEnabledLevels_MASK 0xff000000 +#define SOFT_REGISTERS_TABLE_19__SClkDpmEnabledLevels__SHIFT 0x18 +#define SOFT_REGISTERS_TABLE_20__VCEDpmEnabledLevels_MASK 0xff +#define SOFT_REGISTERS_TABLE_20__VCEDpmEnabledLevels__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_20__ACPDpmEnabledLevels_MASK 0xff00 +#define SOFT_REGISTERS_TABLE_20__ACPDpmEnabledLevels__SHIFT 0x8 +#define SOFT_REGISTERS_TABLE_20__SAMUDpmEnabledLevels_MASK 0xff0000 +#define SOFT_REGISTERS_TABLE_20__SAMUDpmEnabledLevels__SHIFT 0x10 +#define SOFT_REGISTERS_TABLE_20__UVDDpmEnabledLevels_MASK 0xff000000 +#define SOFT_REGISTERS_TABLE_20__UVDDpmEnabledLevels__SHIFT 0x18 +#define SOFT_REGISTERS_TABLE_21__DRAM_LOG_ADDR_H_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_21__DRAM_LOG_ADDR_H__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_22__DRAM_LOG_ADDR_L_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_22__DRAM_LOG_ADDR_L__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_23__DRAM_LOG_PHY_ADDR_H_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_23__DRAM_LOG_PHY_ADDR_H__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_24__DRAM_LOG_PHY_ADDR_L_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_24__DRAM_LOG_PHY_ADDR_L__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_25__DRAM_LOG_BUFF_SIZE_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_25__DRAM_LOG_BUFF_SIZE__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_26__UlvEnterCount_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_26__UlvEnterCount__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_27__UlvTime_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_27__UlvTime__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_28__UcodeLoadStatus_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_28__UcodeLoadStatus__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_29__Reserved_0_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_29__Reserved_0__SHIFT 0x0 +#define SOFT_REGISTERS_TABLE_30__Reserved_1_MASK 0xffffffff +#define SOFT_REGISTERS_TABLE_30__Reserved_1__SHIFT 0x0 +#define PM_FUSES_1__SviLoadLineOffsetVddC_MASK 0xff +#define PM_FUSES_1__SviLoadLineOffsetVddC__SHIFT 0x0 +#define PM_FUSES_1__SviLoadLineTrimVddC_MASK 0xff00 +#define PM_FUSES_1__SviLoadLineTrimVddC__SHIFT 0x8 +#define PM_FUSES_1__SviLoadLineVddC_MASK 0xff0000 +#define PM_FUSES_1__SviLoadLineVddC__SHIFT 0x10 +#define PM_FUSES_1__SviLoadLineEn_MASK 0xff000000 +#define PM_FUSES_1__SviLoadLineEn__SHIFT 0x18 +#define PM_FUSES_2__TDC_MAWt_MASK 0xff +#define PM_FUSES_2__TDC_MAWt__SHIFT 0x0 +#define PM_FUSES_2__TDC_VDDC_ThrottleReleaseLimitPerc_MASK 0xff00 +#define PM_FUSES_2__TDC_VDDC_ThrottleReleaseLimitPerc__SHIFT 0x8 +#define PM_FUSES_2__TDC_VDDC_PkgLimit_MASK 0xffff0000 +#define PM_FUSES_2__TDC_VDDC_PkgLimit__SHIFT 0x10 +#define PM_FUSES_3__Reserved_MASK 0xff +#define PM_FUSES_3__Reserved__SHIFT 0x0 +#define PM_FUSES_3__LPMLTemperatureMax_MASK 0xff00 +#define PM_FUSES_3__LPMLTemperatureMax__SHIFT 0x8 +#define PM_FUSES_3__LPMLTemperatureMin_MASK 0xff0000 +#define PM_FUSES_3__LPMLTemperatureMin__SHIFT 0x10 +#define PM_FUSES_3__TdcWaterfallCtl_MASK 0xff000000 +#define PM_FUSES_3__TdcWaterfallCtl__SHIFT 0x18 +#define PM_FUSES_4__LPMLTemperatureScaler_3_MASK 0xff +#define PM_FUSES_4__LPMLTemperatureScaler_3__SHIFT 0x0 +#define PM_FUSES_4__LPMLTemperatureScaler_2_MASK 0xff00 +#define PM_FUSES_4__LPMLTemperatureScaler_2__SHIFT 0x8 +#define PM_FUSES_4__LPMLTemperatureScaler_1_MASK 0xff0000 +#define PM_FUSES_4__LPMLTemperatureScaler_1__SHIFT 0x10 +#define PM_FUSES_4__LPMLTemperatureScaler_0_MASK 0xff000000 +#define PM_FUSES_4__LPMLTemperatureScaler_0__SHIFT 0x18 +#define PM_FUSES_5__LPMLTemperatureScaler_7_MASK 0xff +#define PM_FUSES_5__LPMLTemperatureScaler_7__SHIFT 0x0 +#define PM_FUSES_5__LPMLTemperatureScaler_6_MASK 0xff00 +#define PM_FUSES_5__LPMLTemperatureScaler_6__SHIFT 0x8 +#define PM_FUSES_5__LPMLTemperatureScaler_5_MASK 0xff0000 +#define PM_FUSES_5__LPMLTemperatureScaler_5__SHIFT 0x10 +#define PM_FUSES_5__LPMLTemperatureScaler_4_MASK 0xff000000 +#define PM_FUSES_5__LPMLTemperatureScaler_4__SHIFT 0x18 +#define PM_FUSES_6__LPMLTemperatureScaler_11_MASK 0xff +#define PM_FUSES_6__LPMLTemperatureScaler_11__SHIFT 0x0 +#define PM_FUSES_6__LPMLTemperatureScaler_10_MASK 0xff00 +#define PM_FUSES_6__LPMLTemperatureScaler_10__SHIFT 0x8 +#define PM_FUSES_6__LPMLTemperatureScaler_9_MASK 0xff0000 +#define PM_FUSES_6__LPMLTemperatureScaler_9__SHIFT 0x10 +#define PM_FUSES_6__LPMLTemperatureScaler_8_MASK 0xff000000 +#define PM_FUSES_6__LPMLTemperatureScaler_8__SHIFT 0x18 +#define PM_FUSES_7__LPMLTemperatureScaler_15_MASK 0xff +#define PM_FUSES_7__LPMLTemperatureScaler_15__SHIFT 0x0 +#define PM_FUSES_7__LPMLTemperatureScaler_14_MASK 0xff00 +#define PM_FUSES_7__LPMLTemperatureScaler_14__SHIFT 0x8 +#define PM_FUSES_7__LPMLTemperatureScaler_13_MASK 0xff0000 +#define PM_FUSES_7__LPMLTemperatureScaler_13__SHIFT 0x10 +#define PM_FUSES_7__LPMLTemperatureScaler_12_MASK 0xff000000 +#define PM_FUSES_7__LPMLTemperatureScaler_12__SHIFT 0x18 +#define PM_FUSES_8__FuzzyFan_ErrorRateSetDelta_MASK 0xffff +#define PM_FUSES_8__FuzzyFan_ErrorRateSetDelta__SHIFT 0x0 +#define PM_FUSES_8__FuzzyFan_ErrorSetDelta_MASK 0xffff0000 +#define PM_FUSES_8__FuzzyFan_ErrorSetDelta__SHIFT 0x10 +#define PM_FUSES_9__Reserved6_MASK 0xffff +#define PM_FUSES_9__Reserved6__SHIFT 0x0 +#define PM_FUSES_9__FuzzyFan_PwmSetDelta_MASK 0xffff0000 +#define PM_FUSES_9__FuzzyFan_PwmSetDelta__SHIFT 0x10 +#define PM_FUSES_10__GnbLPML_3_MASK 0xff +#define PM_FUSES_10__GnbLPML_3__SHIFT 0x0 +#define PM_FUSES_10__GnbLPML_2_MASK 0xff00 +#define PM_FUSES_10__GnbLPML_2__SHIFT 0x8 +#define PM_FUSES_10__GnbLPML_1_MASK 0xff0000 +#define PM_FUSES_10__GnbLPML_1__SHIFT 0x10 +#define PM_FUSES_10__GnbLPML_0_MASK 0xff000000 +#define PM_FUSES_10__GnbLPML_0__SHIFT 0x18 +#define PM_FUSES_11__GnbLPML_7_MASK 0xff +#define PM_FUSES_11__GnbLPML_7__SHIFT 0x0 +#define PM_FUSES_11__GnbLPML_6_MASK 0xff00 +#define PM_FUSES_11__GnbLPML_6__SHIFT 0x8 +#define PM_FUSES_11__GnbLPML_5_MASK 0xff0000 +#define PM_FUSES_11__GnbLPML_5__SHIFT 0x10 +#define PM_FUSES_11__GnbLPML_4_MASK 0xff000000 +#define PM_FUSES_11__GnbLPML_4__SHIFT 0x18 +#define PM_FUSES_12__GnbLPML_11_MASK 0xff +#define PM_FUSES_12__GnbLPML_11__SHIFT 0x0 +#define PM_FUSES_12__GnbLPML_10_MASK 0xff00 +#define PM_FUSES_12__GnbLPML_10__SHIFT 0x8 +#define PM_FUSES_12__GnbLPML_9_MASK 0xff0000 +#define PM_FUSES_12__GnbLPML_9__SHIFT 0x10 +#define PM_FUSES_12__GnbLPML_8_MASK 0xff000000 +#define PM_FUSES_12__GnbLPML_8__SHIFT 0x18 +#define PM_FUSES_13__GnbLPML_15_MASK 0xff +#define PM_FUSES_13__GnbLPML_15__SHIFT 0x0 +#define PM_FUSES_13__GnbLPML_14_MASK 0xff00 +#define PM_FUSES_13__GnbLPML_14__SHIFT 0x8 +#define PM_FUSES_13__GnbLPML_13_MASK 0xff0000 +#define PM_FUSES_13__GnbLPML_13__SHIFT 0x10 +#define PM_FUSES_13__GnbLPML_12_MASK 0xff000000 +#define PM_FUSES_13__GnbLPML_12__SHIFT 0x18 +#define PM_FUSES_14__Reserved1_1_MASK 0xff +#define PM_FUSES_14__Reserved1_1__SHIFT 0x0 +#define PM_FUSES_14__Reserved1_0_MASK 0xff00 +#define PM_FUSES_14__Reserved1_0__SHIFT 0x8 +#define PM_FUSES_14__GnbLPMLMinVid_MASK 0xff0000 +#define PM_FUSES_14__GnbLPMLMinVid__SHIFT 0x10 +#define PM_FUSES_14__GnbLPMLMaxVid_MASK 0xff000000 +#define PM_FUSES_14__GnbLPMLMaxVid__SHIFT 0x18 +#define PM_FUSES_15__BapmVddCBaseLeakageLoSidd_MASK 0xffff +#define PM_FUSES_15__BapmVddCBaseLeakageLoSidd__SHIFT 0x0 +#define PM_FUSES_15__BapmVddCBaseLeakageHiSidd_MASK 0xffff0000 +#define PM_FUSES_15__BapmVddCBaseLeakageHiSidd__SHIFT 0x10 +#define SMU_PM_STATUS_0__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_0__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_1__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_1__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_2__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_2__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_3__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_3__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_4__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_4__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_5__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_5__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_6__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_6__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_7__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_7__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_8__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_8__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_9__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_9__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_10__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_10__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_11__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_11__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_12__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_12__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_13__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_13__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_14__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_14__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_15__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_15__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_16__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_16__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_17__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_17__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_18__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_18__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_19__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_19__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_20__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_20__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_21__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_21__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_22__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_22__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_23__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_23__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_24__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_24__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_25__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_25__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_26__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_26__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_27__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_27__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_28__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_28__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_29__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_29__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_30__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_30__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_31__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_31__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_32__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_32__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_33__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_33__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_34__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_34__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_35__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_35__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_36__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_36__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_37__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_37__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_38__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_38__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_39__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_39__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_40__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_40__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_41__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_41__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_42__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_42__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_43__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_43__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_44__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_44__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_45__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_45__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_46__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_46__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_47__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_47__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_48__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_48__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_49__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_49__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_50__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_50__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_51__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_51__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_52__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_52__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_53__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_53__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_54__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_54__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_55__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_55__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_56__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_56__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_57__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_57__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_58__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_58__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_59__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_59__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_60__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_60__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_61__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_61__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_62__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_62__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_63__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_63__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_64__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_64__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_65__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_65__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_66__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_66__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_67__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_67__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_68__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_68__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_69__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_69__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_70__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_70__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_71__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_71__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_72__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_72__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_73__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_73__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_74__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_74__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_75__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_75__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_76__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_76__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_77__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_77__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_78__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_78__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_79__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_79__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_80__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_80__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_81__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_81__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_82__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_82__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_83__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_83__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_84__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_84__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_85__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_85__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_86__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_86__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_87__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_87__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_88__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_88__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_89__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_89__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_90__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_90__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_91__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_91__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_92__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_92__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_93__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_93__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_94__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_94__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_95__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_95__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_96__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_96__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_97__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_97__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_98__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_98__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_99__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_99__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_100__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_100__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_101__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_101__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_102__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_102__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_103__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_103__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_104__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_104__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_105__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_105__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_106__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_106__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_107__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_107__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_108__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_108__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_109__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_109__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_110__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_110__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_111__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_111__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_112__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_112__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_113__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_113__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_114__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_114__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_115__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_115__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_116__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_116__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_117__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_117__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_118__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_118__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_119__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_119__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_120__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_120__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_121__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_121__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_122__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_122__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_123__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_123__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_124__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_124__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_125__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_125__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_126__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_126__DATA__SHIFT 0x0 +#define SMU_PM_STATUS_127__DATA_MASK 0xffffffff +#define SMU_PM_STATUS_127__DATA__SHIFT 0x0 +#define CG_THERMAL_INT_ENA__THERM_INTH_SET_MASK 0x1 +#define CG_THERMAL_INT_ENA__THERM_INTH_SET__SHIFT 0x0 +#define CG_THERMAL_INT_ENA__THERM_INTL_SET_MASK 0x2 +#define CG_THERMAL_INT_ENA__THERM_INTL_SET__SHIFT 0x1 +#define CG_THERMAL_INT_ENA__THERM_TRIGGER_SET_MASK 0x4 +#define CG_THERMAL_INT_ENA__THERM_TRIGGER_SET__SHIFT 0x2 +#define CG_THERMAL_INT_ENA__THERM_INTH_CLR_MASK 0x8 +#define CG_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT 0x3 +#define CG_THERMAL_INT_ENA__THERM_INTL_CLR_MASK 0x10 +#define CG_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT 0x4 +#define CG_THERMAL_INT_ENA__THERM_TRIGGER_CLR_MASK 0x20 +#define CG_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT 0x5 +#define CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK 0xff +#define CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT 0x0 +#define CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK 0xff00 +#define CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT 0x8 +#define CG_THERMAL_INT_CTRL__GNB_TEMP_THRESHOLD_MASK 0xff0000 +#define CG_THERMAL_INT_CTRL__GNB_TEMP_THRESHOLD__SHIFT 0x10 +#define CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK 0x1000000 +#define CG_THERMAL_INT_CTRL__THERM_INTH_MASK__SHIFT 0x18 +#define CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK 0x2000000 +#define CG_THERMAL_INT_CTRL__THERM_INTL_MASK__SHIFT 0x19 +#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK 0x4000000 +#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_MASK__SHIFT 0x1a +#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_CNB_MASK_MASK 0x8000000 +#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_CNB_MASK__SHIFT 0x1b +#define CG_THERMAL_INT_CTRL__THERM_GNB_HW_ENA_MASK 0x10000000 +#define CG_THERMAL_INT_CTRL__THERM_GNB_HW_ENA__SHIFT 0x1c +#define CG_THERMAL_INT_STATUS__THERM_INTH_DETECT_MASK 0x1 +#define CG_THERMAL_INT_STATUS__THERM_INTH_DETECT__SHIFT 0x0 +#define CG_THERMAL_INT_STATUS__THERM_INTL_DETECT_MASK 0x2 +#define CG_THERMAL_INT_STATUS__THERM_INTL_DETECT__SHIFT 0x1 +#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_DETECT_MASK 0x4 +#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_DETECT__SHIFT 0x2 +#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_CNB_DETECT_MASK 0x8 +#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_CNB_DETECT__SHIFT 0x3 +#define CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK 0x7 +#define CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT 0x0 +#define CG_THERMAL_CTRL__THERM_INC_CLK_MASK 0x8 +#define CG_THERMAL_CTRL__THERM_INC_CLK__SHIFT 0x3 +#define CG_THERMAL_CTRL__SPARE_MASK 0x3ff0 +#define CG_THERMAL_CTRL__SPARE__SHIFT 0x4 +#define CG_THERMAL_CTRL__DIG_THERM_DPM_MASK 0x3fc000 +#define CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT 0xe +#define CG_THERMAL_CTRL__RESERVED_MASK 0x1c00000 +#define CG_THERMAL_CTRL__RESERVED__SHIFT 0x16 +#define CG_THERMAL_CTRL__CTF_PAD_POLARITY_MASK 0x2000000 +#define CG_THERMAL_CTRL__CTF_PAD_POLARITY__SHIFT 0x19 +#define CG_THERMAL_CTRL__CTF_PAD_EN_MASK 0x4000000 +#define CG_THERMAL_CTRL__CTF_PAD_EN__SHIFT 0x1a +#define CG_THERMAL_STATUS__SPARE_MASK 0x1ff +#define CG_THERMAL_STATUS__SPARE__SHIFT 0x0 +#define CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK 0x1fe00 +#define CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT 0x9 +#define CG_THERMAL_STATUS__THERM_ALERT_MASK 0x20000 +#define CG_THERMAL_STATUS__THERM_ALERT__SHIFT 0x11 +#define CG_THERMAL_STATUS__GEN_STATUS_MASK 0x3c0000 +#define CG_THERMAL_STATUS__GEN_STATUS__SHIFT 0x12 +#define CG_THERMAL_INT__DIG_THERM_CTF_MASK 0xff +#define CG_THERMAL_INT__DIG_THERM_CTF__SHIFT 0x0 +#define CG_THERMAL_INT__DIG_THERM_INTH_MASK 0xff00 +#define CG_THERMAL_INT__DIG_THERM_INTH__SHIFT 0x8 +#define CG_THERMAL_INT__DIG_THERM_INTL_MASK 0xff0000 +#define CG_THERMAL_INT__DIG_THERM_INTL__SHIFT 0x10 +#define CG_THERMAL_INT__THERM_INT_MASK_MASK 0xf000000 +#define CG_THERMAL_INT__THERM_INT_MASK__SHIFT 0x18 +#define CG_MULT_THERMAL_CTRL__TS_FILTER_MASK 0xf +#define CG_MULT_THERMAL_CTRL__TS_FILTER__SHIFT 0x0 +#define CG_MULT_THERMAL_CTRL__UNUSED_MASK 0x1f0 +#define CG_MULT_THERMAL_CTRL__UNUSED__SHIFT 0x4 +#define CG_MULT_THERMAL_CTRL__THERMAL_RANGE_RST_MASK 0x200 +#define CG_MULT_THERMAL_CTRL__THERMAL_RANGE_RST__SHIFT 0x9 +#define CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK 0xff00000 +#define CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT 0x14 +#define CG_MULT_THERMAL_CTRL__THM_READY_CLEAR_MASK 0x10000000 +#define CG_MULT_THERMAL_CTRL__THM_READY_CLEAR__SHIFT 0x1c +#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK 0x1ff +#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP__SHIFT 0x0 +#define CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK 0x3fe00 +#define CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT 0x9 +#define THM_TMON2_CTRL__POWER_DOWN_MASK 0x1 +#define THM_TMON2_CTRL__POWER_DOWN__SHIFT 0x0 +#define THM_TMON2_CTRL__BGADJ_MASK 0x1fe +#define THM_TMON2_CTRL__BGADJ__SHIFT 0x1 +#define THM_TMON2_CTRL__BGADJ_MODE_MASK 0x200 +#define THM_TMON2_CTRL__BGADJ_MODE__SHIFT 0x9 +#define THM_TMON2_CTRL__TMON_PAUSE_MASK 0x400 +#define THM_TMON2_CTRL__TMON_PAUSE__SHIFT 0xa +#define THM_TMON2_CTRL__INT_MEAS_EN_MASK 0x800 +#define THM_TMON2_CTRL__INT_MEAS_EN__SHIFT 0xb +#define THM_TMON2_CTRL__DEBUG_MODE_MASK 0x1000 +#define THM_TMON2_CTRL__DEBUG_MODE__SHIFT 0xc +#define THM_TMON2_CTRL__EN_CFG_SERDES_MASK 0x2000 +#define THM_TMON2_CTRL__EN_CFG_SERDES__SHIFT 0xd +#define THM_TMON2_CTRL2__RDIL_PRESENT_MASK 0xffff +#define THM_TMON2_CTRL2__RDIL_PRESENT__SHIFT 0x0 +#define THM_TMON2_CTRL2__RDIR_PRESENT_MASK 0xffff0000 +#define THM_TMON2_CTRL2__RDIR_PRESENT__SHIFT 0x10 +#define THM_TMON2_CSR_WR__CSR_WRITE_MASK 0x1 +#define THM_TMON2_CSR_WR__CSR_WRITE__SHIFT 0x0 +#define THM_TMON2_CSR_WR__CSR_READ_MASK 0x2 +#define THM_TMON2_CSR_WR__CSR_READ__SHIFT 0x1 +#define THM_TMON2_CSR_WR__CSR_ADDR_MASK 0xffc +#define THM_TMON2_CSR_WR__CSR_ADDR__SHIFT 0x2 +#define THM_TMON2_CSR_WR__WRITE_DATA_MASK 0xfff000 +#define THM_TMON2_CSR_WR__WRITE_DATA__SHIFT 0xc +#define THM_TMON2_CSR_WR__SPARE_MASK 0x1000000 +#define THM_TMON2_CSR_WR__SPARE__SHIFT 0x18 +#define THM_TMON2_CSR_RD__READ_DATA_MASK 0xfff +#define THM_TMON2_CSR_RD__READ_DATA__SHIFT 0x0 +#define CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK 0xff +#define CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT 0x0 +#define CG_FDO_CTRL0__FAN_SPINUP_DUTY_MASK 0xff00 +#define CG_FDO_CTRL0__FAN_SPINUP_DUTY__SHIFT 0x8 +#define CG_FDO_CTRL0__FDO_PWM_MANUAL_MASK 0x10000 +#define CG_FDO_CTRL0__FDO_PWM_MANUAL__SHIFT 0x10 +#define CG_FDO_CTRL0__FDO_PWM_HYSTER_MASK 0x7e0000 +#define CG_FDO_CTRL0__FDO_PWM_HYSTER__SHIFT 0x11 +#define CG_FDO_CTRL0__FDO_PWM_RAMP_EN_MASK 0x800000 +#define CG_FDO_CTRL0__FDO_PWM_RAMP_EN__SHIFT 0x17 +#define CG_FDO_CTRL0__FDO_PWM_RAMP_MASK 0xff000000 +#define CG_FDO_CTRL0__FDO_PWM_RAMP__SHIFT 0x18 +#define CG_FDO_CTRL1__FMAX_DUTY100_MASK 0xff +#define CG_FDO_CTRL1__FMAX_DUTY100__SHIFT 0x0 +#define CG_FDO_CTRL1__FMIN_DUTY_MASK 0xff00 +#define CG_FDO_CTRL1__FMIN_DUTY__SHIFT 0x8 +#define CG_FDO_CTRL1__M_MASK 0xff0000 +#define CG_FDO_CTRL1__M__SHIFT 0x10 +#define CG_FDO_CTRL1__RESERVED_MASK 0x3f000000 +#define CG_FDO_CTRL1__RESERVED__SHIFT 0x18 +#define CG_FDO_CTRL1__FDO_PWRDNB_MASK 0x40000000 +#define CG_FDO_CTRL1__FDO_PWRDNB__SHIFT 0x1e +#define CG_FDO_CTRL2__TMIN_MASK 0xff +#define CG_FDO_CTRL2__TMIN__SHIFT 0x0 +#define CG_FDO_CTRL2__FAN_SPINUP_TIME_MASK 0x700 +#define CG_FDO_CTRL2__FAN_SPINUP_TIME__SHIFT 0x8 +#define CG_FDO_CTRL2__FDO_PWM_MODE_MASK 0x3800 +#define CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT 0xb +#define CG_FDO_CTRL2__TMIN_HYSTER_MASK 0x1c000 +#define CG_FDO_CTRL2__TMIN_HYSTER__SHIFT 0xe +#define CG_FDO_CTRL2__TMAX_MASK 0x1fe0000 +#define CG_FDO_CTRL2__TMAX__SHIFT 0x11 +#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK 0xfe000000 +#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT 0x19 +#define CG_TACH_CTRL__EDGE_PER_REV_MASK 0x7 +#define CG_TACH_CTRL__EDGE_PER_REV__SHIFT 0x0 +#define CG_TACH_CTRL__TARGET_PERIOD_MASK 0xfffffff8 +#define CG_TACH_CTRL__TARGET_PERIOD__SHIFT 0x3 +#define CG_TACH_STATUS__TACH_PERIOD_MASK 0xffffffff +#define CG_TACH_STATUS__TACH_PERIOD__SHIFT 0x0 +#define CC_THM_STRAPS0__TMON0_BGADJ_MASK 0x1fe +#define CC_THM_STRAPS0__TMON0_BGADJ__SHIFT 0x1 +#define CC_THM_STRAPS0__TMON1_BGADJ_MASK 0x1fe00 +#define CC_THM_STRAPS0__TMON1_BGADJ__SHIFT 0x9 +#define CC_THM_STRAPS0__TMON_CMON_FUSE_SEL_MASK 0x20000 +#define CC_THM_STRAPS0__TMON_CMON_FUSE_SEL__SHIFT 0x11 +#define CC_THM_STRAPS0__NUM_ACQ_MASK 0x1c0000 +#define CC_THM_STRAPS0__NUM_ACQ__SHIFT 0x12 +#define CC_THM_STRAPS0__TMON_CLK_SEL_MASK 0xe00000 +#define CC_THM_STRAPS0__TMON_CLK_SEL__SHIFT 0x15 +#define CC_THM_STRAPS0__TMON_CONFIG_SOURCE_MASK 0x1000000 +#define CC_THM_STRAPS0__TMON_CONFIG_SOURCE__SHIFT 0x18 +#define CC_THM_STRAPS0__CTF_DISABLE_MASK 0x2000000 +#define CC_THM_STRAPS0__CTF_DISABLE__SHIFT 0x19 +#define CC_THM_STRAPS0__TMON0_DISABLE_MASK 0x4000000 +#define CC_THM_STRAPS0__TMON0_DISABLE__SHIFT 0x1a +#define CC_THM_STRAPS0__TMON1_DISABLE_MASK 0x8000000 +#define CC_THM_STRAPS0__TMON1_DISABLE__SHIFT 0x1b +#define CC_THM_STRAPS0__TMON2_DISABLE_MASK 0x10000000 +#define CC_THM_STRAPS0__TMON2_DISABLE__SHIFT 0x1c +#define CC_THM_STRAPS0__TMON3_DISABLE_MASK 0x20000000 +#define CC_THM_STRAPS0__TMON3_DISABLE__SHIFT 0x1d +#define CC_THM_STRAPS0__UNUSED_MASK 0x80000000 +#define CC_THM_STRAPS0__UNUSED__SHIFT 0x1f +#define THM_TMON0_RDIL0_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL0_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL0_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL0_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL0_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL0_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL1_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL1_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL1_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL1_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL1_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL1_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL2_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL2_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL2_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL2_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL2_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL2_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL3_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL3_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL3_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL3_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL3_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL3_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL4_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL4_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL4_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL4_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL4_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL4_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL5_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL5_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL5_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL5_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL5_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL5_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL6_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL6_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL6_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL6_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL6_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL6_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL7_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL7_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL7_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL7_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL7_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL7_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL8_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL8_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL8_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL8_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL8_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL8_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL9_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL9_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL9_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL9_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL9_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL9_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL10_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL10_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL10_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL10_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL10_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL10_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL11_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL11_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL11_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL11_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL11_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL11_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL12_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL12_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL12_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL12_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL12_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL12_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL13_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL13_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL13_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL13_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL13_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL13_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL14_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL14_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL14_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL14_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL14_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL14_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIL15_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIL15_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIL15_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIL15_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIL15_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIL15_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR0_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR0_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR0_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR0_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR0_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR0_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR1_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR1_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR1_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR1_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR1_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR1_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR2_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR2_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR2_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR2_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR2_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR2_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR3_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR3_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR3_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR3_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR3_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR3_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR4_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR4_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR4_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR4_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR4_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR4_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR5_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR5_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR5_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR5_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR5_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR5_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR6_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR6_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR6_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR6_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR6_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR6_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR7_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR7_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR7_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR7_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR7_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR7_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR8_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR8_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR8_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR8_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR8_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR8_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR9_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR9_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR9_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR9_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR9_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR9_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR10_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR10_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR10_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR10_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR10_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR10_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR11_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR11_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR11_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR11_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR11_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR11_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR12_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR12_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR12_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR12_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR12_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR12_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR13_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR13_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR13_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR13_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR13_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR13_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR14_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR14_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR14_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR14_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR14_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR14_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_RDIR15_DATA__Z_MASK 0x7ff +#define THM_TMON0_RDIR15_DATA__Z__SHIFT 0x0 +#define THM_TMON0_RDIR15_DATA__VALID_MASK 0x800 +#define THM_TMON0_RDIR15_DATA__VALID__SHIFT 0xb +#define THM_TMON0_RDIR15_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_RDIR15_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL0_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL0_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL0_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL0_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL0_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL0_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL1_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL1_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL1_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL1_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL1_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL1_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL2_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL2_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL2_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL2_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL2_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL2_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL3_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL3_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL3_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL3_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL3_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL3_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL4_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL4_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL4_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL4_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL4_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL4_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL5_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL5_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL5_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL5_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL5_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL5_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL6_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL6_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL6_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL6_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL6_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL6_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL7_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL7_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL7_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL7_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL7_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL7_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL8_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL8_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL8_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL8_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL8_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL8_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL9_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL9_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL9_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL9_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL9_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL9_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL10_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL10_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL10_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL10_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL10_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL10_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL11_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL11_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL11_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL11_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL11_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL11_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL12_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL12_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL12_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL12_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL12_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL12_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL13_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL13_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL13_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL13_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL13_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL13_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL14_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL14_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL14_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL14_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL14_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL14_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIL15_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIL15_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIL15_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIL15_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIL15_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIL15_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR0_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR0_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR0_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR0_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR0_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR0_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR1_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR1_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR1_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR1_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR1_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR1_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR2_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR2_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR2_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR2_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR2_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR2_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR3_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR3_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR3_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR3_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR3_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR3_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR4_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR4_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR4_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR4_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR4_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR4_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR5_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR5_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR5_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR5_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR5_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR5_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR6_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR6_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR6_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR6_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR6_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR6_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR7_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR7_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR7_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR7_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR7_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR7_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR8_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR8_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR8_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR8_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR8_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR8_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR9_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR9_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR9_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR9_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR9_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR9_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR10_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR10_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR10_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR10_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR10_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR10_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR11_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR11_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR11_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR11_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR11_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR11_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR12_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR12_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR12_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR12_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR12_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR12_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR13_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR13_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR13_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR13_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR13_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR13_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR14_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR14_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR14_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR14_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR14_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR14_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_RDIR15_DATA__Z_MASK 0x7ff +#define THM_TMON1_RDIR15_DATA__Z__SHIFT 0x0 +#define THM_TMON1_RDIR15_DATA__VALID_MASK 0x800 +#define THM_TMON1_RDIR15_DATA__VALID__SHIFT 0xb +#define THM_TMON1_RDIR15_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_RDIR15_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL0_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL0_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL0_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL0_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL0_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL0_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL1_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL1_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL1_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL1_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL1_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL1_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL2_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL2_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL2_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL2_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL2_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL2_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL3_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL3_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL3_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL3_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL3_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL3_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL4_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL4_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL4_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL4_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL4_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL4_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL5_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL5_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL5_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL5_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL5_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL5_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL6_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL6_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL6_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL6_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL6_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL6_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL7_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL7_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL7_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL7_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL7_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL7_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL8_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL8_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL8_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL8_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL8_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL8_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL9_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL9_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL9_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL9_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL9_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL9_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL10_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL10_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL10_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL10_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL10_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL10_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL11_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL11_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL11_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL11_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL11_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL11_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL12_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL12_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL12_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL12_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL12_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL12_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL13_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL13_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL13_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL13_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL13_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL13_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL14_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL14_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL14_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL14_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL14_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL14_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIL15_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIL15_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIL15_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIL15_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIL15_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIL15_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR0_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR0_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR0_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR0_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR0_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR0_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR1_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR1_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR1_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR1_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR1_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR1_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR2_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR2_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR2_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR2_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR2_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR2_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR3_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR3_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR3_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR3_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR3_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR3_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR4_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR4_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR4_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR4_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR4_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR4_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR5_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR5_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR5_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR5_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR5_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR5_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR6_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR6_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR6_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR6_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR6_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR6_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR7_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR7_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR7_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR7_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR7_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR7_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR8_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR8_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR8_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR8_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR8_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR8_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR9_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR9_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR9_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR9_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR9_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR9_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR10_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR10_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR10_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR10_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR10_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR10_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR11_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR11_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR11_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR11_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR11_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR11_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR12_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR12_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR12_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR12_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR12_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR12_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR13_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR13_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR13_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR13_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR13_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR13_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR14_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR14_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR14_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR14_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR14_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR14_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_RDIR15_DATA__Z_MASK 0x7ff +#define THM_TMON2_RDIR15_DATA__Z__SHIFT 0x0 +#define THM_TMON2_RDIR15_DATA__VALID_MASK 0x800 +#define THM_TMON2_RDIR15_DATA__VALID__SHIFT 0xb +#define THM_TMON2_RDIR15_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_RDIR15_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_INT_DATA__Z_MASK 0x7ff +#define THM_TMON0_INT_DATA__Z__SHIFT 0x0 +#define THM_TMON0_INT_DATA__VALID_MASK 0x800 +#define THM_TMON0_INT_DATA__VALID__SHIFT 0xb +#define THM_TMON0_INT_DATA__TEMP_MASK 0xfff000 +#define THM_TMON0_INT_DATA__TEMP__SHIFT 0xc +#define THM_TMON1_INT_DATA__Z_MASK 0x7ff +#define THM_TMON1_INT_DATA__Z__SHIFT 0x0 +#define THM_TMON1_INT_DATA__VALID_MASK 0x800 +#define THM_TMON1_INT_DATA__VALID__SHIFT 0xb +#define THM_TMON1_INT_DATA__TEMP_MASK 0xfff000 +#define THM_TMON1_INT_DATA__TEMP__SHIFT 0xc +#define THM_TMON2_INT_DATA__Z_MASK 0x7ff +#define THM_TMON2_INT_DATA__Z__SHIFT 0x0 +#define THM_TMON2_INT_DATA__VALID_MASK 0x800 +#define THM_TMON2_INT_DATA__VALID__SHIFT 0xb +#define THM_TMON2_INT_DATA__TEMP_MASK 0xfff000 +#define THM_TMON2_INT_DATA__TEMP__SHIFT 0xc +#define THM_TMON0_DEBUG__DEBUG_RDI_MASK 0x1f +#define THM_TMON0_DEBUG__DEBUG_RDI__SHIFT 0x0 +#define THM_TMON0_DEBUG__DEBUG_Z_MASK 0xffe0 +#define THM_TMON0_DEBUG__DEBUG_Z__SHIFT 0x5 +#define THM_TMON1_DEBUG__DEBUG_RDI_MASK 0x1f +#define THM_TMON1_DEBUG__DEBUG_RDI__SHIFT 0x0 +#define THM_TMON1_DEBUG__DEBUG_Z_MASK 0xffe0 +#define THM_TMON1_DEBUG__DEBUG_Z__SHIFT 0x5 +#define THM_TMON2_DEBUG__DEBUG_RDI_MASK 0x1f +#define THM_TMON2_DEBUG__DEBUG_RDI__SHIFT 0x0 +#define THM_TMON2_DEBUG__DEBUG_Z_MASK 0xffe0 +#define THM_TMON2_DEBUG__DEBUG_Z__SHIFT 0x5 +#define THM_TMON0_STATUS__CURRENT_RDI_MASK 0x1f +#define THM_TMON0_STATUS__CURRENT_RDI__SHIFT 0x0 +#define THM_TMON0_STATUS__MEAS_DONE_MASK 0x20 +#define THM_TMON0_STATUS__MEAS_DONE__SHIFT 0x5 +#define THM_TMON1_STATUS__CURRENT_RDI_MASK 0x1f +#define THM_TMON1_STATUS__CURRENT_RDI__SHIFT 0x0 +#define THM_TMON1_STATUS__MEAS_DONE_MASK 0x20 +#define THM_TMON1_STATUS__MEAS_DONE__SHIFT 0x5 +#define THM_TMON2_STATUS__CURRENT_RDI_MASK 0x1f +#define THM_TMON2_STATUS__CURRENT_RDI__SHIFT 0x0 +#define THM_TMON2_STATUS__MEAS_DONE_MASK 0x20 +#define THM_TMON2_STATUS__MEAS_DONE__SHIFT 0x5 +#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK 0x1 +#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN__SHIFT 0x0 +#define GENERAL_PWRMGT__STATIC_PM_EN_MASK 0x2 +#define GENERAL_PWRMGT__STATIC_PM_EN__SHIFT 0x1 +#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK 0x4 +#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS__SHIFT 0x2 +#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE_MASK 0x8 +#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE__SHIFT 0x3 +#define GENERAL_PWRMGT__SW_SMIO_INDEX_MASK 0x40 +#define GENERAL_PWRMGT__SW_SMIO_INDEX__SHIFT 0x6 +#define GENERAL_PWRMGT__LOW_VOLT_D2_ACPI_MASK 0x100 +#define GENERAL_PWRMGT__LOW_VOLT_D2_ACPI__SHIFT 0x8 +#define GENERAL_PWRMGT__LOW_VOLT_D3_ACPI_MASK 0x200 +#define GENERAL_PWRMGT__LOW_VOLT_D3_ACPI__SHIFT 0x9 +#define GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK 0x400 +#define GENERAL_PWRMGT__VOLT_PWRMGT_EN__SHIFT 0xa +#define GENERAL_PWRMGT__SPARE11_MASK 0x800 +#define GENERAL_PWRMGT__SPARE11__SHIFT 0xb +#define GENERAL_PWRMGT__GPU_COUNTER_ACPI_MASK 0x4000 +#define GENERAL_PWRMGT__GPU_COUNTER_ACPI__SHIFT 0xe +#define GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK 0x8000 +#define GENERAL_PWRMGT__GPU_COUNTER_CLK__SHIFT 0xf +#define GENERAL_PWRMGT__GPU_COUNTER_OFF_MASK 0x10000 +#define GENERAL_PWRMGT__GPU_COUNTER_OFF__SHIFT 0x10 +#define GENERAL_PWRMGT__GPU_COUNTER_INTF_OFF_MASK 0x20000 +#define GENERAL_PWRMGT__GPU_COUNTER_INTF_OFF__SHIFT 0x11 +#define GENERAL_PWRMGT__SPARE18_MASK 0x40000 +#define GENERAL_PWRMGT__SPARE18__SHIFT 0x12 +#define GENERAL_PWRMGT__ACPI_D3_VID_MASK 0x180000 +#define GENERAL_PWRMGT__ACPI_D3_VID__SHIFT 0x13 +#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK 0x800000 +#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN__SHIFT 0x17 +#define GENERAL_PWRMGT__SPARE27_MASK 0x8000000 +#define GENERAL_PWRMGT__SPARE27__SHIFT 0x1b +#define GENERAL_PWRMGT__SPARE_MASK 0xf0000000 +#define GENERAL_PWRMGT__SPARE__SHIFT 0x1c +#define CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK 0x3 +#define CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT 0x0 +#define CNB_PWRMGT_CNTL__GNB_SLOW_MASK 0x4 +#define CNB_PWRMGT_CNTL__GNB_SLOW__SHIFT 0x2 +#define CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK 0x8 +#define CNB_PWRMGT_CNTL__FORCE_NB_PS1__SHIFT 0x3 +#define CNB_PWRMGT_CNTL__DPM_ENABLED_MASK 0x10 +#define CNB_PWRMGT_CNTL__DPM_ENABLED__SHIFT 0x4 +#define CNB_PWRMGT_CNTL__SPARE_MASK 0xffffffe0 +#define CNB_PWRMGT_CNTL__SPARE__SHIFT 0x5 +#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK 0x1 +#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF__SHIFT 0x0 +#define SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK 0x10 +#define SCLK_PWRMGT_CNTL__RESET_BUSY_CNT__SHIFT 0x4 +#define SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK 0x20 +#define SCLK_PWRMGT_CNTL__RESET_SCLK_CNT__SHIFT 0x5 +#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN_MASK 0x4000 +#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN__SHIFT 0xe +#define SCLK_PWRMGT_CNTL__AUTO_SCLK_PULSE_SKIP_MASK 0x8000 +#define SCLK_PWRMGT_CNTL__AUTO_SCLK_PULSE_SKIP__SHIFT 0xf +#define SCLK_PWRMGT_CNTL__LIGHT_SLEEP_COUNTER_MASK 0x1f0000 +#define SCLK_PWRMGT_CNTL__LIGHT_SLEEP_COUNTER__SHIFT 0x10 +#define SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK 0x200000 +#define SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN__SHIFT 0x15 +#define TARGET_AND_CURRENT_PROFILE_INDEX__TARGET_STATE_MASK 0xf +#define TARGET_AND_CURRENT_PROFILE_INDEX__TARGET_STATE__SHIFT 0x0 +#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_MASK 0xf0 +#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE__SHIFT 0x4 +#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK 0xf00 +#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT 0x8 +#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_MCLK_INDEX_MASK 0xf000 +#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_MCLK_INDEX__SHIFT 0xc +#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK 0x1f0000 +#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT 0x10 +#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_SCLK_INDEX_MASK 0x3e00000 +#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_SCLK_INDEX__SHIFT 0x15 +#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_LCLK_INDEX_MASK 0x1c000000 +#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_LCLK_INDEX__SHIFT 0x1a +#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_LCLK_INDEX_MASK 0xe0000000 +#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_LCLK_INDEX__SHIFT 0x1d +#define PWR_PCC_CONTROL__PCC_POLARITY_MASK 0x1 +#define PWR_PCC_CONTROL__PCC_POLARITY__SHIFT 0x0 +#define PWR_PCC_GPIO_SELECT__GPIO_MASK 0xffffffff +#define PWR_PCC_GPIO_SELECT__GPIO__SHIFT 0x0 +#define CG_FREQ_TRAN_VOTING_0__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 +#define CG_FREQ_TRAN_VOTING_0__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 +#define CG_FREQ_TRAN_VOTING_0__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 +#define CG_FREQ_TRAN_VOTING_0__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 +#define CG_FREQ_TRAN_VOTING_0__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 +#define CG_FREQ_TRAN_VOTING_0__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 +#define CG_FREQ_TRAN_VOTING_0__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 +#define CG_FREQ_TRAN_VOTING_0__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 +#define CG_FREQ_TRAN_VOTING_0__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 +#define CG_FREQ_TRAN_VOTING_0__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 +#define CG_FREQ_TRAN_VOTING_0__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 +#define CG_FREQ_TRAN_VOTING_0__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 +#define CG_FREQ_TRAN_VOTING_0__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 +#define CG_FREQ_TRAN_VOTING_0__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 +#define CG_FREQ_TRAN_VOTING_0__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 +#define CG_FREQ_TRAN_VOTING_0__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 +#define CG_FREQ_TRAN_VOTING_0__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 +#define CG_FREQ_TRAN_VOTING_0__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 +#define CG_FREQ_TRAN_VOTING_0__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 +#define CG_FREQ_TRAN_VOTING_0__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 +#define CG_FREQ_TRAN_VOTING_0__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 +#define CG_FREQ_TRAN_VOTING_0__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa +#define CG_FREQ_TRAN_VOTING_0__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 +#define CG_FREQ_TRAN_VOTING_0__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb +#define CG_FREQ_TRAN_VOTING_0__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 +#define CG_FREQ_TRAN_VOTING_0__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc +#define CG_FREQ_TRAN_VOTING_0__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 +#define CG_FREQ_TRAN_VOTING_0__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd +#define CG_FREQ_TRAN_VOTING_0__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe +#define CG_FREQ_TRAN_VOTING_0__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf +#define CG_FREQ_TRAN_VOTING_0__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 +#define CG_FREQ_TRAN_VOTING_0__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 +#define CG_FREQ_TRAN_VOTING_0__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 +#define CG_FREQ_TRAN_VOTING_0__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 +#define CG_FREQ_TRAN_VOTING_0__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 +#define CG_FREQ_TRAN_VOTING_0__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 +#define CG_FREQ_TRAN_VOTING_0__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 +#define CG_FREQ_TRAN_VOTING_0__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 +#define CG_FREQ_TRAN_VOTING_0__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 +#define CG_FREQ_TRAN_VOTING_0__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 +#define CG_FREQ_TRAN_VOTING_0__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a +#define CG_FREQ_TRAN_VOTING_0__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b +#define CG_FREQ_TRAN_VOTING_0__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c +#define CG_FREQ_TRAN_VOTING_0__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 +#define CG_FREQ_TRAN_VOTING_0__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d +#define CG_FREQ_TRAN_VOTING_0__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 +#define CG_FREQ_TRAN_VOTING_0__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e +#define CG_FREQ_TRAN_VOTING_1__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 +#define CG_FREQ_TRAN_VOTING_1__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 +#define CG_FREQ_TRAN_VOTING_1__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 +#define CG_FREQ_TRAN_VOTING_1__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 +#define CG_FREQ_TRAN_VOTING_1__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 +#define CG_FREQ_TRAN_VOTING_1__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 +#define CG_FREQ_TRAN_VOTING_1__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 +#define CG_FREQ_TRAN_VOTING_1__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 +#define CG_FREQ_TRAN_VOTING_1__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 +#define CG_FREQ_TRAN_VOTING_1__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 +#define CG_FREQ_TRAN_VOTING_1__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 +#define CG_FREQ_TRAN_VOTING_1__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 +#define CG_FREQ_TRAN_VOTING_1__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 +#define CG_FREQ_TRAN_VOTING_1__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 +#define CG_FREQ_TRAN_VOTING_1__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 +#define CG_FREQ_TRAN_VOTING_1__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 +#define CG_FREQ_TRAN_VOTING_1__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 +#define CG_FREQ_TRAN_VOTING_1__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 +#define CG_FREQ_TRAN_VOTING_1__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 +#define CG_FREQ_TRAN_VOTING_1__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 +#define CG_FREQ_TRAN_VOTING_1__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 +#define CG_FREQ_TRAN_VOTING_1__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa +#define CG_FREQ_TRAN_VOTING_1__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 +#define CG_FREQ_TRAN_VOTING_1__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb +#define CG_FREQ_TRAN_VOTING_1__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 +#define CG_FREQ_TRAN_VOTING_1__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc +#define CG_FREQ_TRAN_VOTING_1__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 +#define CG_FREQ_TRAN_VOTING_1__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd +#define CG_FREQ_TRAN_VOTING_1__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe +#define CG_FREQ_TRAN_VOTING_1__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf +#define CG_FREQ_TRAN_VOTING_1__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 +#define CG_FREQ_TRAN_VOTING_1__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 +#define CG_FREQ_TRAN_VOTING_1__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 +#define CG_FREQ_TRAN_VOTING_1__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 +#define CG_FREQ_TRAN_VOTING_1__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 +#define CG_FREQ_TRAN_VOTING_1__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 +#define CG_FREQ_TRAN_VOTING_1__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 +#define CG_FREQ_TRAN_VOTING_1__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 +#define CG_FREQ_TRAN_VOTING_1__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 +#define CG_FREQ_TRAN_VOTING_1__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 +#define CG_FREQ_TRAN_VOTING_1__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a +#define CG_FREQ_TRAN_VOTING_1__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b +#define CG_FREQ_TRAN_VOTING_1__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c +#define CG_FREQ_TRAN_VOTING_1__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 +#define CG_FREQ_TRAN_VOTING_1__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d +#define CG_FREQ_TRAN_VOTING_1__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 +#define CG_FREQ_TRAN_VOTING_1__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e +#define CG_FREQ_TRAN_VOTING_2__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 +#define CG_FREQ_TRAN_VOTING_2__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 +#define CG_FREQ_TRAN_VOTING_2__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 +#define CG_FREQ_TRAN_VOTING_2__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 +#define CG_FREQ_TRAN_VOTING_2__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 +#define CG_FREQ_TRAN_VOTING_2__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 +#define CG_FREQ_TRAN_VOTING_2__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 +#define CG_FREQ_TRAN_VOTING_2__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 +#define CG_FREQ_TRAN_VOTING_2__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 +#define CG_FREQ_TRAN_VOTING_2__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 +#define CG_FREQ_TRAN_VOTING_2__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 +#define CG_FREQ_TRAN_VOTING_2__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 +#define CG_FREQ_TRAN_VOTING_2__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 +#define CG_FREQ_TRAN_VOTING_2__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 +#define CG_FREQ_TRAN_VOTING_2__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 +#define CG_FREQ_TRAN_VOTING_2__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 +#define CG_FREQ_TRAN_VOTING_2__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 +#define CG_FREQ_TRAN_VOTING_2__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 +#define CG_FREQ_TRAN_VOTING_2__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 +#define CG_FREQ_TRAN_VOTING_2__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 +#define CG_FREQ_TRAN_VOTING_2__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 +#define CG_FREQ_TRAN_VOTING_2__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa +#define CG_FREQ_TRAN_VOTING_2__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 +#define CG_FREQ_TRAN_VOTING_2__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb +#define CG_FREQ_TRAN_VOTING_2__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 +#define CG_FREQ_TRAN_VOTING_2__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc +#define CG_FREQ_TRAN_VOTING_2__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 +#define CG_FREQ_TRAN_VOTING_2__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd +#define CG_FREQ_TRAN_VOTING_2__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe +#define CG_FREQ_TRAN_VOTING_2__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf +#define CG_FREQ_TRAN_VOTING_2__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 +#define CG_FREQ_TRAN_VOTING_2__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 +#define CG_FREQ_TRAN_VOTING_2__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 +#define CG_FREQ_TRAN_VOTING_2__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 +#define CG_FREQ_TRAN_VOTING_2__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 +#define CG_FREQ_TRAN_VOTING_2__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 +#define CG_FREQ_TRAN_VOTING_2__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 +#define CG_FREQ_TRAN_VOTING_2__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 +#define CG_FREQ_TRAN_VOTING_2__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 +#define CG_FREQ_TRAN_VOTING_2__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 +#define CG_FREQ_TRAN_VOTING_2__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a +#define CG_FREQ_TRAN_VOTING_2__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b +#define CG_FREQ_TRAN_VOTING_2__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c +#define CG_FREQ_TRAN_VOTING_2__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 +#define CG_FREQ_TRAN_VOTING_2__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d +#define CG_FREQ_TRAN_VOTING_2__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 +#define CG_FREQ_TRAN_VOTING_2__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e +#define CG_FREQ_TRAN_VOTING_3__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 +#define CG_FREQ_TRAN_VOTING_3__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 +#define CG_FREQ_TRAN_VOTING_3__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 +#define CG_FREQ_TRAN_VOTING_3__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 +#define CG_FREQ_TRAN_VOTING_3__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 +#define CG_FREQ_TRAN_VOTING_3__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 +#define CG_FREQ_TRAN_VOTING_3__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 +#define CG_FREQ_TRAN_VOTING_3__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 +#define CG_FREQ_TRAN_VOTING_3__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 +#define CG_FREQ_TRAN_VOTING_3__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 +#define CG_FREQ_TRAN_VOTING_3__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 +#define CG_FREQ_TRAN_VOTING_3__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 +#define CG_FREQ_TRAN_VOTING_3__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 +#define CG_FREQ_TRAN_VOTING_3__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 +#define CG_FREQ_TRAN_VOTING_3__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 +#define CG_FREQ_TRAN_VOTING_3__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 +#define CG_FREQ_TRAN_VOTING_3__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 +#define CG_FREQ_TRAN_VOTING_3__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 +#define CG_FREQ_TRAN_VOTING_3__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 +#define CG_FREQ_TRAN_VOTING_3__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 +#define CG_FREQ_TRAN_VOTING_3__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 +#define CG_FREQ_TRAN_VOTING_3__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa +#define CG_FREQ_TRAN_VOTING_3__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 +#define CG_FREQ_TRAN_VOTING_3__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb +#define CG_FREQ_TRAN_VOTING_3__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 +#define CG_FREQ_TRAN_VOTING_3__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc +#define CG_FREQ_TRAN_VOTING_3__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 +#define CG_FREQ_TRAN_VOTING_3__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd +#define CG_FREQ_TRAN_VOTING_3__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe +#define CG_FREQ_TRAN_VOTING_3__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf +#define CG_FREQ_TRAN_VOTING_3__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 +#define CG_FREQ_TRAN_VOTING_3__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 +#define CG_FREQ_TRAN_VOTING_3__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 +#define CG_FREQ_TRAN_VOTING_3__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 +#define CG_FREQ_TRAN_VOTING_3__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 +#define CG_FREQ_TRAN_VOTING_3__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 +#define CG_FREQ_TRAN_VOTING_3__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 +#define CG_FREQ_TRAN_VOTING_3__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 +#define CG_FREQ_TRAN_VOTING_3__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 +#define CG_FREQ_TRAN_VOTING_3__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 +#define CG_FREQ_TRAN_VOTING_3__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a +#define CG_FREQ_TRAN_VOTING_3__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b +#define CG_FREQ_TRAN_VOTING_3__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c +#define CG_FREQ_TRAN_VOTING_3__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 +#define CG_FREQ_TRAN_VOTING_3__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d +#define CG_FREQ_TRAN_VOTING_3__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 +#define CG_FREQ_TRAN_VOTING_3__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e +#define CG_FREQ_TRAN_VOTING_4__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 +#define CG_FREQ_TRAN_VOTING_4__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 +#define CG_FREQ_TRAN_VOTING_4__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 +#define CG_FREQ_TRAN_VOTING_4__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 +#define CG_FREQ_TRAN_VOTING_4__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 +#define CG_FREQ_TRAN_VOTING_4__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 +#define CG_FREQ_TRAN_VOTING_4__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 +#define CG_FREQ_TRAN_VOTING_4__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 +#define CG_FREQ_TRAN_VOTING_4__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 +#define CG_FREQ_TRAN_VOTING_4__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 +#define CG_FREQ_TRAN_VOTING_4__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 +#define CG_FREQ_TRAN_VOTING_4__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 +#define CG_FREQ_TRAN_VOTING_4__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 +#define CG_FREQ_TRAN_VOTING_4__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 +#define CG_FREQ_TRAN_VOTING_4__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 +#define CG_FREQ_TRAN_VOTING_4__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 +#define CG_FREQ_TRAN_VOTING_4__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 +#define CG_FREQ_TRAN_VOTING_4__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 +#define CG_FREQ_TRAN_VOTING_4__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 +#define CG_FREQ_TRAN_VOTING_4__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 +#define CG_FREQ_TRAN_VOTING_4__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 +#define CG_FREQ_TRAN_VOTING_4__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa +#define CG_FREQ_TRAN_VOTING_4__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 +#define CG_FREQ_TRAN_VOTING_4__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb +#define CG_FREQ_TRAN_VOTING_4__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 +#define CG_FREQ_TRAN_VOTING_4__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc +#define CG_FREQ_TRAN_VOTING_4__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 +#define CG_FREQ_TRAN_VOTING_4__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd +#define CG_FREQ_TRAN_VOTING_4__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe +#define CG_FREQ_TRAN_VOTING_4__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf +#define CG_FREQ_TRAN_VOTING_4__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 +#define CG_FREQ_TRAN_VOTING_4__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 +#define CG_FREQ_TRAN_VOTING_4__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 +#define CG_FREQ_TRAN_VOTING_4__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 +#define CG_FREQ_TRAN_VOTING_4__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 +#define CG_FREQ_TRAN_VOTING_4__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 +#define CG_FREQ_TRAN_VOTING_4__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 +#define CG_FREQ_TRAN_VOTING_4__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 +#define CG_FREQ_TRAN_VOTING_4__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 +#define CG_FREQ_TRAN_VOTING_4__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 +#define CG_FREQ_TRAN_VOTING_4__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a +#define CG_FREQ_TRAN_VOTING_4__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b +#define CG_FREQ_TRAN_VOTING_4__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c +#define CG_FREQ_TRAN_VOTING_4__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 +#define CG_FREQ_TRAN_VOTING_4__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d +#define CG_FREQ_TRAN_VOTING_4__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 +#define CG_FREQ_TRAN_VOTING_4__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e +#define CG_FREQ_TRAN_VOTING_5__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 +#define CG_FREQ_TRAN_VOTING_5__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 +#define CG_FREQ_TRAN_VOTING_5__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 +#define CG_FREQ_TRAN_VOTING_5__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 +#define CG_FREQ_TRAN_VOTING_5__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 +#define CG_FREQ_TRAN_VOTING_5__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 +#define CG_FREQ_TRAN_VOTING_5__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 +#define CG_FREQ_TRAN_VOTING_5__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 +#define CG_FREQ_TRAN_VOTING_5__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 +#define CG_FREQ_TRAN_VOTING_5__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 +#define CG_FREQ_TRAN_VOTING_5__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 +#define CG_FREQ_TRAN_VOTING_5__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 +#define CG_FREQ_TRAN_VOTING_5__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 +#define CG_FREQ_TRAN_VOTING_5__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 +#define CG_FREQ_TRAN_VOTING_5__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 +#define CG_FREQ_TRAN_VOTING_5__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 +#define CG_FREQ_TRAN_VOTING_5__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 +#define CG_FREQ_TRAN_VOTING_5__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 +#define CG_FREQ_TRAN_VOTING_5__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 +#define CG_FREQ_TRAN_VOTING_5__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 +#define CG_FREQ_TRAN_VOTING_5__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 +#define CG_FREQ_TRAN_VOTING_5__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa +#define CG_FREQ_TRAN_VOTING_5__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 +#define CG_FREQ_TRAN_VOTING_5__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb +#define CG_FREQ_TRAN_VOTING_5__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 +#define CG_FREQ_TRAN_VOTING_5__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc +#define CG_FREQ_TRAN_VOTING_5__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 +#define CG_FREQ_TRAN_VOTING_5__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd +#define CG_FREQ_TRAN_VOTING_5__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe +#define CG_FREQ_TRAN_VOTING_5__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf +#define CG_FREQ_TRAN_VOTING_5__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 +#define CG_FREQ_TRAN_VOTING_5__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 +#define CG_FREQ_TRAN_VOTING_5__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 +#define CG_FREQ_TRAN_VOTING_5__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 +#define CG_FREQ_TRAN_VOTING_5__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 +#define CG_FREQ_TRAN_VOTING_5__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 +#define CG_FREQ_TRAN_VOTING_5__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 +#define CG_FREQ_TRAN_VOTING_5__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 +#define CG_FREQ_TRAN_VOTING_5__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 +#define CG_FREQ_TRAN_VOTING_5__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 +#define CG_FREQ_TRAN_VOTING_5__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a +#define CG_FREQ_TRAN_VOTING_5__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b +#define CG_FREQ_TRAN_VOTING_5__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c +#define CG_FREQ_TRAN_VOTING_5__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 +#define CG_FREQ_TRAN_VOTING_5__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d +#define CG_FREQ_TRAN_VOTING_5__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 +#define CG_FREQ_TRAN_VOTING_5__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e +#define CG_FREQ_TRAN_VOTING_6__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 +#define CG_FREQ_TRAN_VOTING_6__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 +#define CG_FREQ_TRAN_VOTING_6__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 +#define CG_FREQ_TRAN_VOTING_6__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 +#define CG_FREQ_TRAN_VOTING_6__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 +#define CG_FREQ_TRAN_VOTING_6__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 +#define CG_FREQ_TRAN_VOTING_6__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 +#define CG_FREQ_TRAN_VOTING_6__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 +#define CG_FREQ_TRAN_VOTING_6__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 +#define CG_FREQ_TRAN_VOTING_6__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 +#define CG_FREQ_TRAN_VOTING_6__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 +#define CG_FREQ_TRAN_VOTING_6__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 +#define CG_FREQ_TRAN_VOTING_6__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 +#define CG_FREQ_TRAN_VOTING_6__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 +#define CG_FREQ_TRAN_VOTING_6__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 +#define CG_FREQ_TRAN_VOTING_6__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 +#define CG_FREQ_TRAN_VOTING_6__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 +#define CG_FREQ_TRAN_VOTING_6__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 +#define CG_FREQ_TRAN_VOTING_6__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 +#define CG_FREQ_TRAN_VOTING_6__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 +#define CG_FREQ_TRAN_VOTING_6__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 +#define CG_FREQ_TRAN_VOTING_6__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa +#define CG_FREQ_TRAN_VOTING_6__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 +#define CG_FREQ_TRAN_VOTING_6__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb +#define CG_FREQ_TRAN_VOTING_6__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 +#define CG_FREQ_TRAN_VOTING_6__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc +#define CG_FREQ_TRAN_VOTING_6__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 +#define CG_FREQ_TRAN_VOTING_6__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd +#define CG_FREQ_TRAN_VOTING_6__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe +#define CG_FREQ_TRAN_VOTING_6__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf +#define CG_FREQ_TRAN_VOTING_6__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 +#define CG_FREQ_TRAN_VOTING_6__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 +#define CG_FREQ_TRAN_VOTING_6__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 +#define CG_FREQ_TRAN_VOTING_6__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 +#define CG_FREQ_TRAN_VOTING_6__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 +#define CG_FREQ_TRAN_VOTING_6__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 +#define CG_FREQ_TRAN_VOTING_6__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 +#define CG_FREQ_TRAN_VOTING_6__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 +#define CG_FREQ_TRAN_VOTING_6__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 +#define CG_FREQ_TRAN_VOTING_6__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 +#define CG_FREQ_TRAN_VOTING_6__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a +#define CG_FREQ_TRAN_VOTING_6__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b +#define CG_FREQ_TRAN_VOTING_6__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c +#define CG_FREQ_TRAN_VOTING_6__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 +#define CG_FREQ_TRAN_VOTING_6__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d +#define CG_FREQ_TRAN_VOTING_6__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 +#define CG_FREQ_TRAN_VOTING_6__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e +#define CG_FREQ_TRAN_VOTING_7__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1 +#define CG_FREQ_TRAN_VOTING_7__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0 +#define CG_FREQ_TRAN_VOTING_7__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2 +#define CG_FREQ_TRAN_VOTING_7__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1 +#define CG_FREQ_TRAN_VOTING_7__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4 +#define CG_FREQ_TRAN_VOTING_7__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2 +#define CG_FREQ_TRAN_VOTING_7__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8 +#define CG_FREQ_TRAN_VOTING_7__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3 +#define CG_FREQ_TRAN_VOTING_7__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10 +#define CG_FREQ_TRAN_VOTING_7__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4 +#define CG_FREQ_TRAN_VOTING_7__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20 +#define CG_FREQ_TRAN_VOTING_7__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5 +#define CG_FREQ_TRAN_VOTING_7__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40 +#define CG_FREQ_TRAN_VOTING_7__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6 +#define CG_FREQ_TRAN_VOTING_7__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80 +#define CG_FREQ_TRAN_VOTING_7__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7 +#define CG_FREQ_TRAN_VOTING_7__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100 +#define CG_FREQ_TRAN_VOTING_7__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8 +#define CG_FREQ_TRAN_VOTING_7__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200 +#define CG_FREQ_TRAN_VOTING_7__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9 +#define CG_FREQ_TRAN_VOTING_7__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400 +#define CG_FREQ_TRAN_VOTING_7__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa +#define CG_FREQ_TRAN_VOTING_7__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800 +#define CG_FREQ_TRAN_VOTING_7__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb +#define CG_FREQ_TRAN_VOTING_7__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000 +#define CG_FREQ_TRAN_VOTING_7__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc +#define CG_FREQ_TRAN_VOTING_7__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000 +#define CG_FREQ_TRAN_VOTING_7__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd +#define CG_FREQ_TRAN_VOTING_7__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe +#define CG_FREQ_TRAN_VOTING_7__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf +#define CG_FREQ_TRAN_VOTING_7__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10 +#define CG_FREQ_TRAN_VOTING_7__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11 +#define CG_FREQ_TRAN_VOTING_7__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12 +#define CG_FREQ_TRAN_VOTING_7__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13 +#define CG_FREQ_TRAN_VOTING_7__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14 +#define CG_FREQ_TRAN_VOTING_7__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15 +#define CG_FREQ_TRAN_VOTING_7__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16 +#define CG_FREQ_TRAN_VOTING_7__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17 +#define CG_FREQ_TRAN_VOTING_7__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18 +#define CG_FREQ_TRAN_VOTING_7__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19 +#define CG_FREQ_TRAN_VOTING_7__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a +#define CG_FREQ_TRAN_VOTING_7__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b +#define CG_FREQ_TRAN_VOTING_7__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c +#define CG_FREQ_TRAN_VOTING_7__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000 +#define CG_FREQ_TRAN_VOTING_7__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d +#define CG_FREQ_TRAN_VOTING_7__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000 +#define CG_FREQ_TRAN_VOTING_7__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e +#define PLL_TEST_CNTL__TST_SRC_SEL_MASK 0xf +#define PLL_TEST_CNTL__TST_SRC_SEL__SHIFT 0x0 +#define PLL_TEST_CNTL__TST_REF_SEL_MASK 0xf0 +#define PLL_TEST_CNTL__TST_REF_SEL__SHIFT 0x4 +#define PLL_TEST_CNTL__REF_TEST_COUNT_MASK 0x7f00 +#define PLL_TEST_CNTL__REF_TEST_COUNT__SHIFT 0x8 +#define PLL_TEST_CNTL__TST_RESET_MASK 0x8000 +#define PLL_TEST_CNTL__TST_RESET__SHIFT 0xf +#define PLL_TEST_CNTL__TEST_COUNT_MASK 0xfffe0000 +#define PLL_TEST_CNTL__TEST_COUNT__SHIFT 0x11 +#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_MASK 0xffff +#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT 0x0 +#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT_MASK 0xf0000 +#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT 0x10 +#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK 0x3 +#define CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT 0x0 +#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT_MASK 0x3fff0 +#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT__SHIFT 0x4 +#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT_MASK 0x700000 +#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT__SHIFT 0x14 +#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK 0x3000000 +#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT 0x18 +#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_DISABLE_MASK 0x10000000 +#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_DISABLE__SHIFT 0x1c +#define CG_DISPLAY_GAP_CNTL2__VBI_PREDICTION_MASK 0xffffffff +#define CG_DISPLAY_GAP_CNTL2__VBI_PREDICTION__SHIFT 0x0 +#define CG_ACPI_CNTL__SCLK_ACPI_DIV_MASK 0x7f +#define CG_ACPI_CNTL__SCLK_ACPI_DIV__SHIFT 0x0 +#define CG_ACPI_CNTL__SCLK_CHANGE_SKIP_MASK 0x80 +#define CG_ACPI_CNTL__SCLK_CHANGE_SKIP__SHIFT 0x7 +#define SCLK_DEEP_SLEEP_CNTL__DIV_ID_MASK 0x7 +#define SCLK_DEEP_SLEEP_CNTL__DIV_ID__SHIFT 0x0 +#define SCLK_DEEP_SLEEP_CNTL__RAMP_DIS_MASK 0x8 +#define SCLK_DEEP_SLEEP_CNTL__RAMP_DIS__SHIFT 0x3 +#define SCLK_DEEP_SLEEP_CNTL__HYSTERESIS_MASK 0xfff0 +#define SCLK_DEEP_SLEEP_CNTL__HYSTERESIS__SHIFT 0x4 +#define SCLK_DEEP_SLEEP_CNTL__SCLK_RUNNING_MASK_MASK 0x10000 +#define SCLK_DEEP_SLEEP_CNTL__SCLK_RUNNING_MASK__SHIFT 0x10 +#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_MASK_MASK 0x20000 +#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_MASK__SHIFT 0x11 +#define SCLK_DEEP_SLEEP_CNTL__ALLOW_NBPSTATE_MASK_MASK 0x40000 +#define SCLK_DEEP_SLEEP_CNTL__ALLOW_NBPSTATE_MASK__SHIFT 0x12 +#define SCLK_DEEP_SLEEP_CNTL__BIF_BUSY_MASK_MASK 0x80000 +#define SCLK_DEEP_SLEEP_CNTL__BIF_BUSY_MASK__SHIFT 0x13 +#define SCLK_DEEP_SLEEP_CNTL__UVD_BUSY_MASK_MASK 0x100000 +#define SCLK_DEEP_SLEEP_CNTL__UVD_BUSY_MASK__SHIFT 0x14 +#define SCLK_DEEP_SLEEP_CNTL__MC0SRBM_BUSY_MASK_MASK 0x200000 +#define SCLK_DEEP_SLEEP_CNTL__MC0SRBM_BUSY_MASK__SHIFT 0x15 +#define SCLK_DEEP_SLEEP_CNTL__MC1SRBM_BUSY_MASK_MASK 0x400000 +#define SCLK_DEEP_SLEEP_CNTL__MC1SRBM_BUSY_MASK__SHIFT 0x16 +#define SCLK_DEEP_SLEEP_CNTL__MC_ALLOW_MASK_MASK 0x800000 +#define SCLK_DEEP_SLEEP_CNTL__MC_ALLOW_MASK__SHIFT 0x17 +#define SCLK_DEEP_SLEEP_CNTL__SMU_BUSY_MASK_MASK 0x1000000 +#define SCLK_DEEP_SLEEP_CNTL__SMU_BUSY_MASK__SHIFT 0x18 +#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_NLC_MASK_MASK 0x2000000 +#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_NLC_MASK__SHIFT 0x19 +#define SCLK_DEEP_SLEEP_CNTL__FAST_EXIT_REQ_NBPSTATE_MASK 0x4000000 +#define SCLK_DEEP_SLEEP_CNTL__FAST_EXIT_REQ_NBPSTATE__SHIFT 0x1a +#define SCLK_DEEP_SLEEP_CNTL__DEEP_SLEEP_ENTRY_MODE_MASK 0x8000000 +#define SCLK_DEEP_SLEEP_CNTL__DEEP_SLEEP_ENTRY_MODE__SHIFT 0x1b +#define SCLK_DEEP_SLEEP_CNTL__MBUS2_ACTIVE_MASK_MASK 0x10000000 +#define SCLK_DEEP_SLEEP_CNTL__MBUS2_ACTIVE_MASK__SHIFT 0x1c +#define SCLK_DEEP_SLEEP_CNTL__VCE_BUSY_MASK_MASK 0x20000000 +#define SCLK_DEEP_SLEEP_CNTL__VCE_BUSY_MASK__SHIFT 0x1d +#define SCLK_DEEP_SLEEP_CNTL__AZ_BUSY_MASK_MASK 0x40000000 +#define SCLK_DEEP_SLEEP_CNTL__AZ_BUSY_MASK__SHIFT 0x1e +#define SCLK_DEEP_SLEEP_CNTL__ENABLE_DS_MASK 0x80000000 +#define SCLK_DEEP_SLEEP_CNTL__ENABLE_DS__SHIFT 0x1f +#define SCLK_DEEP_SLEEP_CNTL2__RLC_BUSY_MASK_MASK 0x1 +#define SCLK_DEEP_SLEEP_CNTL2__RLC_BUSY_MASK__SHIFT 0x0 +#define SCLK_DEEP_SLEEP_CNTL2__HDP_BUSY_MASK_MASK 0x2 +#define SCLK_DEEP_SLEEP_CNTL2__HDP_BUSY_MASK__SHIFT 0x1 +#define SCLK_DEEP_SLEEP_CNTL2__ROM_BUSY_MASK_MASK 0x4 +#define SCLK_DEEP_SLEEP_CNTL2__ROM_BUSY_MASK__SHIFT 0x2 +#define SCLK_DEEP_SLEEP_CNTL2__IH_SEM_BUSY_MASK_MASK 0x8 +#define SCLK_DEEP_SLEEP_CNTL2__IH_SEM_BUSY_MASK__SHIFT 0x3 +#define SCLK_DEEP_SLEEP_CNTL2__PDMA_BUSY_MASK_MASK 0x10 +#define SCLK_DEEP_SLEEP_CNTL2__PDMA_BUSY_MASK__SHIFT 0x4 +#define SCLK_DEEP_SLEEP_CNTL2__IDCT_BUSY_MASK_MASK 0x40 +#define SCLK_DEEP_SLEEP_CNTL2__IDCT_BUSY_MASK__SHIFT 0x6 +#define SCLK_DEEP_SLEEP_CNTL2__SDMA_BUSY_MASK_MASK 0x80 +#define SCLK_DEEP_SLEEP_CNTL2__SDMA_BUSY_MASK__SHIFT 0x7 +#define SCLK_DEEP_SLEEP_CNTL2__DC_AZ_BUSY_MASK_MASK 0x100 +#define SCLK_DEEP_SLEEP_CNTL2__DC_AZ_BUSY_MASK__SHIFT 0x8 +#define SCLK_DEEP_SLEEP_CNTL2__ACP_SMU_ALLOW_DSLEEP_STUTTER_MASK_MASK 0x200 +#define SCLK_DEEP_SLEEP_CNTL2__ACP_SMU_ALLOW_DSLEEP_STUTTER_MASK__SHIFT 0x9 +#define SCLK_DEEP_SLEEP_CNTL2__UVD_CG_MC_STAT_BUSY_MASK_MASK 0x400 +#define SCLK_DEEP_SLEEP_CNTL2__UVD_CG_MC_STAT_BUSY_MASK__SHIFT 0xa +#define SCLK_DEEP_SLEEP_CNTL2__VCE_CG_MC_STAT_BUSY_MASK_MASK 0x800 +#define SCLK_DEEP_SLEEP_CNTL2__VCE_CG_MC_STAT_BUSY_MASK__SHIFT 0xb +#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_MC_STAT_BUSY_MASK_MASK 0x1000 +#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_MC_STAT_BUSY_MASK__SHIFT 0xc +#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_STATUS_BUSY_MASK_MASK 0x2000 +#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_STATUS_BUSY_MASK__SHIFT 0xd +#define SCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK_MASK 0x4000 +#define SCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK__SHIFT 0xe +#define SCLK_DEEP_SLEEP_CNTL2__SHALLOW_DIV_ID_MASK 0xe00000 +#define SCLK_DEEP_SLEEP_CNTL2__SHALLOW_DIV_ID__SHIFT 0x15 +#define SCLK_DEEP_SLEEP_CNTL2__INOUT_CUSHION_MASK 0xff000000 +#define SCLK_DEEP_SLEEP_CNTL2__INOUT_CUSHION__SHIFT 0x18 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_0_SMU_BUSY_MASK_MASK 0x1 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_0_SMU_BUSY_MASK__SHIFT 0x0 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_1_SMU_BUSY_MASK_MASK 0x2 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_1_SMU_BUSY_MASK__SHIFT 0x1 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_2_SMU_BUSY_MASK_MASK 0x4 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_2_SMU_BUSY_MASK__SHIFT 0x2 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_3_SMU_BUSY_MASK_MASK 0x8 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_3_SMU_BUSY_MASK__SHIFT 0x3 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_4_SMU_BUSY_MASK_MASK 0x10 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_4_SMU_BUSY_MASK__SHIFT 0x4 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_5_SMU_BUSY_MASK_MASK 0x20 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_5_SMU_BUSY_MASK__SHIFT 0x5 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_6_SMU_BUSY_MASK_MASK 0x40 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_6_SMU_BUSY_MASK__SHIFT 0x6 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_7_SMU_BUSY_MASK_MASK 0x80 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_7_SMU_BUSY_MASK__SHIFT 0x7 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_8_SMU_BUSY_MASK_MASK 0x100 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_8_SMU_BUSY_MASK__SHIFT 0x8 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_9_SMU_BUSY_MASK_MASK 0x200 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_9_SMU_BUSY_MASK__SHIFT 0x9 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_10_SMU_BUSY_MASK_MASK 0x400 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_10_SMU_BUSY_MASK__SHIFT 0xa +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_11_SMU_BUSY_MASK_MASK 0x800 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_11_SMU_BUSY_MASK__SHIFT 0xb +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_12_SMU_BUSY_MASK_MASK 0x1000 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_12_SMU_BUSY_MASK__SHIFT 0xc +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_13_SMU_BUSY_MASK_MASK 0x2000 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_13_SMU_BUSY_MASK__SHIFT 0xd +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_14_SMU_BUSY_MASK_MASK 0x4000 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_14_SMU_BUSY_MASK__SHIFT 0xe +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_15_SMU_BUSY_MASK_MASK 0x8000 +#define SCLK_DEEP_SLEEP_CNTL3__GRBM_15_SMU_BUSY_MASK__SHIFT 0xf +#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_DS_DIV_ID_MASK 0x7 +#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_DS_DIV_ID__SHIFT 0x0 +#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_SS_DIV_ID_MASK 0x38 +#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_SS_DIV_ID__SHIFT 0x3 +#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_ENABLE_MASK 0x10000 +#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_ENABLE__SHIFT 0x10 +#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_DS_DIV_ID_MASK 0xe0000 +#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_DS_DIV_ID__SHIFT 0x11 +#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_SS_DIV_ID_MASK 0x700000 +#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_SS_DIV_ID__SHIFT 0x14 +#define LCLK_DEEP_SLEEP_CNTL__DIV_ID_MASK 0x7 +#define LCLK_DEEP_SLEEP_CNTL__DIV_ID__SHIFT 0x0 +#define LCLK_DEEP_SLEEP_CNTL__RAMP_DIS_MASK 0x8 +#define LCLK_DEEP_SLEEP_CNTL__RAMP_DIS__SHIFT 0x3 +#define LCLK_DEEP_SLEEP_CNTL__HYSTERESIS_MASK 0xfff0 +#define LCLK_DEEP_SLEEP_CNTL__HYSTERESIS__SHIFT 0x4 +#define LCLK_DEEP_SLEEP_CNTL__RESERVED_MASK 0x7fff0000 +#define LCLK_DEEP_SLEEP_CNTL__RESERVED__SHIFT 0x10 +#define LCLK_DEEP_SLEEP_CNTL__ENABLE_DS_MASK 0x80000000 +#define LCLK_DEEP_SLEEP_CNTL__ENABLE_DS__SHIFT 0x1f +#define LCLK_DEEP_SLEEP_CNTL2__RFE_BUSY_MASK_MASK 0x1 +#define LCLK_DEEP_SLEEP_CNTL2__RFE_BUSY_MASK__SHIFT 0x0 +#define LCLK_DEEP_SLEEP_CNTL2__BIF_CG_LCLK_BUSY_MASK_MASK 0x2 +#define LCLK_DEEP_SLEEP_CNTL2__BIF_CG_LCLK_BUSY_MASK__SHIFT 0x1 +#define LCLK_DEEP_SLEEP_CNTL2__L1IMU_SMU_IDLE_MASK_MASK 0x4 +#define LCLK_DEEP_SLEEP_CNTL2__L1IMU_SMU_IDLE_MASK__SHIFT 0x2 +#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_BIT3_MASK 0x8 +#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_BIT3__SHIFT 0x3 +#define LCLK_DEEP_SLEEP_CNTL2__SCLK_RUNNING_MASK_MASK 0x10 +#define LCLK_DEEP_SLEEP_CNTL2__SCLK_RUNNING_MASK__SHIFT 0x4 +#define LCLK_DEEP_SLEEP_CNTL2__SMU_BUSY_MASK_MASK 0x20 +#define LCLK_DEEP_SLEEP_CNTL2__SMU_BUSY_MASK__SHIFT 0x5 +#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE1_MASK_MASK 0x40 +#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE1_MASK__SHIFT 0x6 +#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE2_MASK_MASK 0x80 +#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE2_MASK__SHIFT 0x7 +#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE3_MASK_MASK 0x100 +#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE3_MASK__SHIFT 0x8 +#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE4_MASK_MASK 0x200 +#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE4_MASK__SHIFT 0x9 +#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPP_IDLE_MASK_MASK 0x400 +#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPP_IDLE_MASK__SHIFT 0xa +#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPPSB_IDLE_MASK_MASK 0x800 +#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPPSB_IDLE_MASK__SHIFT 0xb +#define LCLK_DEEP_SLEEP_CNTL2__L1IMUBIF_IDLE_MASK_MASK 0x1000 +#define LCLK_DEEP_SLEEP_CNTL2__L1IMUBIF_IDLE_MASK__SHIFT 0xc +#define LCLK_DEEP_SLEEP_CNTL2__L1IMUINTGEN_IDLE_MASK_MASK 0x2000 +#define LCLK_DEEP_SLEEP_CNTL2__L1IMUINTGEN_IDLE_MASK__SHIFT 0xd +#define LCLK_DEEP_SLEEP_CNTL2__L2IMU_IDLE_MASK_MASK 0x4000 +#define LCLK_DEEP_SLEEP_CNTL2__L2IMU_IDLE_MASK__SHIFT 0xe +#define LCLK_DEEP_SLEEP_CNTL2__ORB_IDLE_MASK_MASK 0x8000 +#define LCLK_DEEP_SLEEP_CNTL2__ORB_IDLE_MASK__SHIFT 0xf +#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_MASK_MASK 0x10000 +#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_MASK__SHIFT 0x10 +#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_ACK_MASK_MASK 0x20000 +#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_ACK_MASK__SHIFT 0x11 +#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_MASK_MASK 0x40000 +#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_MASK__SHIFT 0x12 +#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_ACK_MASK_MASK 0x80000 +#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_ACK_MASK__SHIFT 0x13 +#define LCLK_DEEP_SLEEP_CNTL2__DMAACTIVE_MASK_MASK 0x100000 +#define LCLK_DEEP_SLEEP_CNTL2__DMAACTIVE_MASK__SHIFT 0x14 +#define LCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK_MASK 0x200000 +#define LCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK__SHIFT 0x15 +#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_MASK 0xffc00000 +#define LCLK_DEEP_SLEEP_CNTL2__RESERVED__SHIFT 0x16 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDCI_INDEX_MASK 0xf +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDCI_INDEX__SHIFT 0x0 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDCI_INDEX_MASK 0xf0 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDCI_INDEX__SHIFT 0x4 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_MVDD_INDEX_MASK 0xf00 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_MVDD_INDEX__SHIFT 0x8 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_MVDD_INDEX_MASK 0xf000 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_MVDD_INDEX__SHIFT 0xc +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDC_INDEX_MASK 0xf0000 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDC_INDEX__SHIFT 0x10 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDC_INDEX_MASK 0xf00000 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDC_INDEX__SHIFT 0x14 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK 0xf000000 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT 0x18 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX_MASK 0xf0000000 +#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX__SHIFT 0x1c +#define CG_ULV_PARAMETER__ULV_THRESHOLD_MASK 0xffff +#define CG_ULV_PARAMETER__ULV_THRESHOLD__SHIFT 0x0 +#define CG_ULV_PARAMETER__ULV_THRESHOLD_UNIT_MASK 0xf0000 +#define CG_ULV_PARAMETER__ULV_THRESHOLD_UNIT__SHIFT 0x10 +#define SCLK_MIN_DIV__FRACV_MASK 0xfff +#define SCLK_MIN_DIV__FRACV__SHIFT 0x0 +#define SCLK_MIN_DIV__INTV_MASK 0x7f000 +#define SCLK_MIN_DIV__INTV__SHIFT 0xc +#define PWR_AVFS_SEL__AvfsSel_MASK 0xfffffff +#define PWR_AVFS_SEL__AvfsSel__SHIFT 0x0 +#define PWR_AVFS_CNTL__MmBusIn_MASK 0xff +#define PWR_AVFS_CNTL__MmBusIn__SHIFT 0x0 +#define PWR_AVFS_CNTL__MmLclRdEn_MASK 0x100 +#define PWR_AVFS_CNTL__MmLclRdEn__SHIFT 0x8 +#define PWR_AVFS_CNTL__MmLclWrEn_MASK 0x200 +#define PWR_AVFS_CNTL__MmLclWrEn__SHIFT 0x9 +#define PWR_AVFS_CNTL__MmLclSz_MASK 0xc00 +#define PWR_AVFS_CNTL__MmLclSz__SHIFT 0xa +#define PWR_AVFS_CNTL__MmState_MASK 0x3f000 +#define PWR_AVFS_CNTL__MmState__SHIFT 0xc +#define PWR_AVFS_CNTL__PsmScanMode_MASK 0x40000 +#define PWR_AVFS_CNTL__PsmScanMode__SHIFT 0x12 +#define PWR_AVFS_CNTL__PsmGater_MASK 0x80000 +#define PWR_AVFS_CNTL__PsmGater__SHIFT 0x13 +#define PWR_AVFS_CNTL__PsmTrst_MASK 0x100000 +#define PWR_AVFS_CNTL__PsmTrst__SHIFT 0x14 +#define PWR_AVFS_CNTL__PsmEn_MASK 0x200000 +#define PWR_AVFS_CNTL__PsmEn__SHIFT 0x15 +#define PWR_AVFS_CNTL__SkipPhaseEn_MASK 0x400000 +#define PWR_AVFS_CNTL__SkipPhaseEn__SHIFT 0x16 +#define PWR_AVFS_CNTL__Isolate_MASK 0x800000 +#define PWR_AVFS_CNTL__Isolate__SHIFT 0x17 +#define PWR_AVFS_CNTL__AvfsRst_MASK 0x1000000 +#define PWR_AVFS_CNTL__AvfsRst__SHIFT 0x18 +#define PWR_AVFS_CNTL__PccIsolateEn_MASK 0x2000000 +#define PWR_AVFS_CNTL__PccIsolateEn__SHIFT 0x19 +#define PWR_AVFS_CNTL__DeepSleepIsolateEn_MASK 0x4000000 +#define PWR_AVFS_CNTL__DeepSleepIsolateEn__SHIFT 0x1a +#define PWR_AVFS0_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS0_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS0_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS0_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS0_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS0_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS1_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS1_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS1_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS1_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS1_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS1_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS2_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS2_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS2_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS2_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS2_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS2_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS3_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS3_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS3_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS3_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS3_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS3_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS4_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS4_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS4_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS4_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS4_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS4_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS5_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS5_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS5_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS5_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS5_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS5_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS6_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS6_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS6_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS6_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS6_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS6_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS7_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS7_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS7_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS7_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS7_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS7_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS8_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS8_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS8_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS8_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS8_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS8_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS9_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS9_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS9_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS9_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS9_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS9_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS10_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS10_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS10_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS10_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS10_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS10_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS11_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS11_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS11_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS11_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS11_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS11_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS12_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS12_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS12_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS12_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS12_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS12_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS13_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS13_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS13_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS13_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS13_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS13_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS14_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS14_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS14_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS14_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS14_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS14_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS15_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS15_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS15_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS15_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS15_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS15_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS16_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS16_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS16_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS16_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS16_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS16_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS17_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS17_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS17_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS17_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS17_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS17_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS18_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS18_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS18_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS18_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS18_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS18_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS19_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS19_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS19_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS19_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS19_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS19_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS20_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS20_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS20_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS20_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS20_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS20_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS21_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS21_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS21_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS21_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS21_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS21_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS22_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS22_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS22_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS22_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS22_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS22_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS23_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS23_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS23_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS23_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS23_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS23_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS24_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS24_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS24_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS24_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS24_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS24_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS25_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS25_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS25_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS25_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS25_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS25_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS26_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS26_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS26_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS26_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS26_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS26_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_AVFS27_CNTL_STATUS__MmDatOut_MASK 0xff +#define PWR_AVFS27_CNTL_STATUS__MmDatOut__SHIFT 0x0 +#define PWR_AVFS27_CNTL_STATUS__PsmTdo_MASK 0x100 +#define PWR_AVFS27_CNTL_STATUS__PsmTdo__SHIFT 0x8 +#define PWR_AVFS27_CNTL_STATUS__AlarmFlag_MASK 0x200 +#define PWR_AVFS27_CNTL_STATUS__AlarmFlag__SHIFT 0x9 +#define PWR_CKS_ENABLE__STRETCH_ENABLE_MASK 0x1 +#define PWR_CKS_ENABLE__STRETCH_ENABLE__SHIFT 0x0 +#define PWR_CKS_ENABLE__masterReset_MASK 0x2 +#define PWR_CKS_ENABLE__masterReset__SHIFT 0x1 +#define PWR_CKS_ENABLE__staticEnable_MASK 0x4 +#define PWR_CKS_ENABLE__staticEnable__SHIFT 0x2 +#define PWR_CKS_ENABLE__IGNORE_DROOP_DETECT_MASK 0x8 +#define PWR_CKS_ENABLE__IGNORE_DROOP_DETECT__SHIFT 0x3 +#define PWR_CKS_ENABLE__PCC_HAND_SHAKE_EN_MASK 0x10 +#define PWR_CKS_ENABLE__PCC_HAND_SHAKE_EN__SHIFT 0x4 +#define PWR_CKS_ENABLE__MET_CTRL_SEL_MASK 0x60 +#define PWR_CKS_ENABLE__MET_CTRL_SEL__SHIFT 0x5 +#define PWR_CKS_ENABLE__DS_HAND_SHAKE_EN_MASK 0x80 +#define PWR_CKS_ENABLE__DS_HAND_SHAKE_EN__SHIFT 0x7 +#define PWR_CKS_CNTL__CKS_BYPASS_MASK 0x1 +#define PWR_CKS_CNTL__CKS_BYPASS__SHIFT 0x0 +#define PWR_CKS_CNTL__CKS_PCCEnable_MASK 0x2 +#define PWR_CKS_CNTL__CKS_PCCEnable__SHIFT 0x1 +#define PWR_CKS_CNTL__CKS_TEMP_COMP_MASK 0x4 +#define PWR_CKS_CNTL__CKS_TEMP_COMP__SHIFT 0x2 +#define PWR_CKS_CNTL__CKS_STRETCH_AMOUNT_MASK 0x78 +#define PWR_CKS_CNTL__CKS_STRETCH_AMOUNT__SHIFT 0x3 +#define PWR_CKS_CNTL__CKS_SKIP_PHASE_BYPASS_MASK 0x80 +#define PWR_CKS_CNTL__CKS_SKIP_PHASE_BYPASS__SHIFT 0x7 +#define PWR_CKS_CNTL__CKS_SAMPLE_SIZE_MASK 0xf00 +#define PWR_CKS_CNTL__CKS_SAMPLE_SIZE__SHIFT 0x8 +#define PWR_CKS_CNTL__CKS_FSM_WAIT_CYCLES_MASK 0xf000 +#define PWR_CKS_CNTL__CKS_FSM_WAIT_CYCLES__SHIFT 0xc +#define PWR_CKS_CNTL__CKS_USE_FOR_LOW_FREQ_MASK 0x10000 +#define PWR_CKS_CNTL__CKS_USE_FOR_LOW_FREQ__SHIFT 0x10 +#define PWR_CKS_CNTL__CKS_NO_EXTRA_COARSE_STEP_MASK 0x20000 +#define PWR_CKS_CNTL__CKS_NO_EXTRA_COARSE_STEP__SHIFT 0x11 +#define PWR_CKS_CNTL__CKS_LDO_REFSEL_MASK 0x3c0000 +#define PWR_CKS_CNTL__CKS_LDO_REFSEL__SHIFT 0x12 +#define PWR_CKS_CNTL__DDT_DEBUS_SEL_MASK 0x400000 +#define PWR_CKS_CNTL__DDT_DEBUS_SEL__SHIFT 0x16 +#define PWR_CKS_CNTL__CKS_LDO_READY_COUNT_VAL_MASK 0x7f800000 +#define PWR_CKS_CNTL__CKS_LDO_READY_COUNT_VAL__SHIFT 0x17 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x1ffffff +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x2000000 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x4000000 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x8000000 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x1 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT_MASK 0x2 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_MASK 0x4 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT__SHIFT 0x2 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xffffff80 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x1ffffff +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x2000000 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x4000000 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x8000000 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x1 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT_MASK 0x2 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_MASK 0x4 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT__SHIFT 0x2 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xffffff80 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7 +#define PWR_DISP_TIMER_CONTROL2__DISP_TIMER_PULSE_WIDTH_MASK 0x3ff +#define PWR_DISP_TIMER_CONTROL2__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0 +#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_MASK 0xffff +#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD__SHIFT 0x0 +#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_UNIT_MASK 0xf0000 +#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_UNIT__SHIFT 0x10 +#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_EN_MASK 0x1 +#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_EN__SHIFT 0x0 +#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_DETECT_MASK 0x2 +#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_DETECT__SHIFT 0x1 +#define VDDGFX_IDLE_CONTROL__FORCE_VDDGFX_IDLE_EXIT_MASK 0x4 +#define VDDGFX_IDLE_CONTROL__FORCE_VDDGFX_IDLE_EXIT__SHIFT 0x2 +#define VDDGFX_IDLE_CONTROL__SMC_VDDGFX_IDLE_STATE_MASK 0x8 +#define VDDGFX_IDLE_CONTROL__SMC_VDDGFX_IDLE_STATE__SHIFT 0x3 +#define VDDGFX_IDLE_EXIT__BIF_EXIT_REQ_MASK 0x1 +#define VDDGFX_IDLE_EXIT__BIF_EXIT_REQ__SHIFT 0x0 +#define LCAC_MC0_CNTL__MC0_ENABLE_MASK 0x1 +#define LCAC_MC0_CNTL__MC0_ENABLE__SHIFT 0x0 +#define LCAC_MC0_CNTL__MC0_THRESHOLD_MASK 0x1fffe +#define LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT 0x1 +#define LCAC_MC0_CNTL__MC0_BLOCK_ID_MASK 0x3e0000 +#define LCAC_MC0_CNTL__MC0_BLOCK_ID__SHIFT 0x11 +#define LCAC_MC0_CNTL__MC0_SIGNAL_ID_MASK 0x3fc00000 +#define LCAC_MC0_CNTL__MC0_SIGNAL_ID__SHIFT 0x16 +#define LCAC_MC0_OVR_SEL__MC0_OVR_SEL_MASK 0xffffffff +#define LCAC_MC0_OVR_SEL__MC0_OVR_SEL__SHIFT 0x0 +#define LCAC_MC0_OVR_VAL__MC0_OVR_VAL_MASK 0xffffffff +#define LCAC_MC0_OVR_VAL__MC0_OVR_VAL__SHIFT 0x0 +#define LCAC_MC1_CNTL__MC1_ENABLE_MASK 0x1 +#define LCAC_MC1_CNTL__MC1_ENABLE__SHIFT 0x0 +#define LCAC_MC1_CNTL__MC1_THRESHOLD_MASK 0x1fffe +#define LCAC_MC1_CNTL__MC1_THRESHOLD__SHIFT 0x1 +#define LCAC_MC1_CNTL__MC1_BLOCK_ID_MASK 0x3e0000 +#define LCAC_MC1_CNTL__MC1_BLOCK_ID__SHIFT 0x11 +#define LCAC_MC1_CNTL__MC1_SIGNAL_ID_MASK 0x3fc00000 +#define LCAC_MC1_CNTL__MC1_SIGNAL_ID__SHIFT 0x16 +#define LCAC_MC1_OVR_SEL__MC1_OVR_SEL_MASK 0xffffffff +#define LCAC_MC1_OVR_SEL__MC1_OVR_SEL__SHIFT 0x0 +#define LCAC_MC1_OVR_VAL__MC1_OVR_VAL_MASK 0xffffffff +#define LCAC_MC1_OVR_VAL__MC1_OVR_VAL__SHIFT 0x0 +#define LCAC_MC2_CNTL__MC2_ENABLE_MASK 0x1 +#define LCAC_MC2_CNTL__MC2_ENABLE__SHIFT 0x0 +#define LCAC_MC2_CNTL__MC2_THRESHOLD_MASK 0x1fffe +#define LCAC_MC2_CNTL__MC2_THRESHOLD__SHIFT 0x1 +#define LCAC_MC2_CNTL__MC2_BLOCK_ID_MASK 0x3e0000 +#define LCAC_MC2_CNTL__MC2_BLOCK_ID__SHIFT 0x11 +#define LCAC_MC2_CNTL__MC2_SIGNAL_ID_MASK 0x3fc00000 +#define LCAC_MC2_CNTL__MC2_SIGNAL_ID__SHIFT 0x16 +#define LCAC_MC2_OVR_SEL__MC2_OVR_SEL_MASK 0xffffffff +#define LCAC_MC2_OVR_SEL__MC2_OVR_SEL__SHIFT 0x0 +#define LCAC_MC2_OVR_VAL__MC2_OVR_VAL_MASK 0xffffffff +#define LCAC_MC2_OVR_VAL__MC2_OVR_VAL__SHIFT 0x0 +#define LCAC_MC3_CNTL__MC3_ENABLE_MASK 0x1 +#define LCAC_MC3_CNTL__MC3_ENABLE__SHIFT 0x0 +#define LCAC_MC3_CNTL__MC3_THRESHOLD_MASK 0x1fffe +#define LCAC_MC3_CNTL__MC3_THRESHOLD__SHIFT 0x1 +#define LCAC_MC3_CNTL__MC3_BLOCK_ID_MASK 0x3e0000 +#define LCAC_MC3_CNTL__MC3_BLOCK_ID__SHIFT 0x11 +#define LCAC_MC3_CNTL__MC3_SIGNAL_ID_MASK 0x3fc00000 +#define LCAC_MC3_CNTL__MC3_SIGNAL_ID__SHIFT 0x16 +#define LCAC_MC3_OVR_SEL__MC3_OVR_SEL_MASK 0xffffffff +#define LCAC_MC3_OVR_SEL__MC3_OVR_SEL__SHIFT 0x0 +#define LCAC_MC3_OVR_VAL__MC3_OVR_VAL_MASK 0xffffffff +#define LCAC_MC3_OVR_VAL__MC3_OVR_VAL__SHIFT 0x0 +#define LCAC_MC4_CNTL__MC4_ENABLE_MASK 0x1 +#define LCAC_MC4_CNTL__MC4_ENABLE__SHIFT 0x0 +#define LCAC_MC4_CNTL__MC4_THRESHOLD_MASK 0x1fffe +#define LCAC_MC4_CNTL__MC4_THRESHOLD__SHIFT 0x1 +#define LCAC_MC4_CNTL__MC4_BLOCK_ID_MASK 0x3e0000 +#define LCAC_MC4_CNTL__MC4_BLOCK_ID__SHIFT 0x11 +#define LCAC_MC4_CNTL__MC4_SIGNAL_ID_MASK 0x3fc00000 +#define LCAC_MC4_CNTL__MC4_SIGNAL_ID__SHIFT 0x16 +#define LCAC_MC4_OVR_SEL__MC4_OVR_SEL_MASK 0xffffffff +#define LCAC_MC4_OVR_SEL__MC4_OVR_SEL__SHIFT 0x0 +#define LCAC_MC4_OVR_VAL__MC4_OVR_VAL_MASK 0xffffffff +#define LCAC_MC4_OVR_VAL__MC4_OVR_VAL__SHIFT 0x0 +#define LCAC_MC5_CNTL__MC5_ENABLE_MASK 0x1 +#define LCAC_MC5_CNTL__MC5_ENABLE__SHIFT 0x0 +#define LCAC_MC5_CNTL__MC5_THRESHOLD_MASK 0x1fffe +#define LCAC_MC5_CNTL__MC5_THRESHOLD__SHIFT 0x1 +#define LCAC_MC5_CNTL__MC5_BLOCK_ID_MASK 0x3e0000 +#define LCAC_MC5_CNTL__MC5_BLOCK_ID__SHIFT 0x11 +#define LCAC_MC5_CNTL__MC5_SIGNAL_ID_MASK 0x3fc00000 +#define LCAC_MC5_CNTL__MC5_SIGNAL_ID__SHIFT 0x16 +#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL_MASK 0xffffffff +#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL__SHIFT 0x0 +#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL_MASK 0xffffffff +#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL__SHIFT 0x0 +#define LCAC_MC6_CNTL__MC6_ENABLE_MASK 0x1 +#define LCAC_MC6_CNTL__MC6_ENABLE__SHIFT 0x0 +#define LCAC_MC6_CNTL__MC6_THRESHOLD_MASK 0x1fffe +#define LCAC_MC6_CNTL__MC6_THRESHOLD__SHIFT 0x1 +#define LCAC_MC6_CNTL__MC6_BLOCK_ID_MASK 0x3e0000 +#define LCAC_MC6_CNTL__MC6_BLOCK_ID__SHIFT 0x11 +#define LCAC_MC6_CNTL__MC6_SIGNAL_ID_MASK 0x3fc00000 +#define LCAC_MC6_CNTL__MC6_SIGNAL_ID__SHIFT 0x16 +#define LCAC_MC6_OVR_SEL__MC6_OVR_SEL_MASK 0xffffffff +#define LCAC_MC6_OVR_SEL__MC6_OVR_SEL__SHIFT 0x0 +#define LCAC_MC6_OVR_VAL__MC6_OVR_VAL_MASK 0xffffffff +#define LCAC_MC6_OVR_VAL__MC6_OVR_VAL__SHIFT 0x0 +#define LCAC_MC7_CNTL__MC7_ENABLE_MASK 0x1 +#define LCAC_MC7_CNTL__MC7_ENABLE__SHIFT 0x0 +#define LCAC_MC7_CNTL__MC7_THRESHOLD_MASK 0x1fffe +#define LCAC_MC7_CNTL__MC7_THRESHOLD__SHIFT 0x1 +#define LCAC_MC7_CNTL__MC7_BLOCK_ID_MASK 0x3e0000 +#define LCAC_MC7_CNTL__MC7_BLOCK_ID__SHIFT 0x11 +#define LCAC_MC7_CNTL__MC7_SIGNAL_ID_MASK 0x3fc00000 +#define LCAC_MC7_CNTL__MC7_SIGNAL_ID__SHIFT 0x16 +#define LCAC_MC7_OVR_SEL__MC7_OVR_SEL_MASK 0xffffffff +#define LCAC_MC7_OVR_SEL__MC7_OVR_SEL__SHIFT 0x0 +#define LCAC_MC7_OVR_VAL__MC7_OVR_VAL_MASK 0xffffffff +#define LCAC_MC7_OVR_VAL__MC7_OVR_VAL__SHIFT 0x0 +#define LCAC_CPL_CNTL__CPL_ENABLE_MASK 0x1 +#define LCAC_CPL_CNTL__CPL_ENABLE__SHIFT 0x0 +#define LCAC_CPL_CNTL__CPL_THRESHOLD_MASK 0x1fffe +#define LCAC_CPL_CNTL__CPL_THRESHOLD__SHIFT 0x1 +#define LCAC_CPL_CNTL__CPL_BLOCK_ID_MASK 0x3e0000 +#define LCAC_CPL_CNTL__CPL_BLOCK_ID__SHIFT 0x11 +#define LCAC_CPL_CNTL__CPL_SIGNAL_ID_MASK 0x3fc00000 +#define LCAC_CPL_CNTL__CPL_SIGNAL_ID__SHIFT 0x16 +#define LCAC_CPL_OVR_SEL__CPL_OVR_SEL_MASK 0xffffffff +#define LCAC_CPL_OVR_SEL__CPL_OVR_SEL__SHIFT 0x0 +#define LCAC_CPL_OVR_VAL__CPL_OVR_VAL_MASK 0xffffffff +#define LCAC_CPL_OVR_VAL__CPL_OVR_VAL__SHIFT 0x0 +#define ROM_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff +#define ROM_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0 +#define ROM_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff +#define ROM_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0 +#define ROM_CNTL__SCK_OVERWRITE_MASK 0x2 +#define ROM_CNTL__SCK_OVERWRITE__SHIFT 0x1 +#define ROM_CNTL__CLOCK_GATING_EN_MASK 0x4 +#define ROM_CNTL__CLOCK_GATING_EN__SHIFT 0x2 +#define ROM_CNTL__CSB_ACTIVE_TO_SCK_SETUP_TIME_MASK 0xff00 +#define ROM_CNTL__CSB_ACTIVE_TO_SCK_SETUP_TIME__SHIFT 0x8 +#define ROM_CNTL__CSB_ACTIVE_TO_SCK_HOLD_TIME_MASK 0xff0000 +#define ROM_CNTL__CSB_ACTIVE_TO_SCK_HOLD_TIME__SHIFT 0x10 +#define ROM_CNTL__SCK_PRESCALE_REFCLK_MASK 0xf000000 +#define ROM_CNTL__SCK_PRESCALE_REFCLK__SHIFT 0x18 +#define ROM_CNTL__SCK_PRESCALE_CRYSTAL_CLK_MASK 0xf0000000 +#define ROM_CNTL__SCK_PRESCALE_CRYSTAL_CLK__SHIFT 0x1c +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR_MASK 0xffffff +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR__SHIFT 0x0 +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE_MASK 0x1000000 +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE__SHIFT 0x18 +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE_MASK 0x2000000 +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE__SHIFT 0x19 +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE_MASK 0xc000000 +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE__SHIFT 0x1a +#define ROM_STATUS__ROM_BUSY_MASK 0x1 +#define ROM_STATUS__ROM_BUSY__SHIFT 0x0 +#define CGTT_ROM_CLK_CTRL0__ON_DELAY_MASK 0xf +#define CGTT_ROM_CLK_CTRL0__ON_DELAY__SHIFT 0x0 +#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS_MASK 0xff0 +#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4 +#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000 +#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e +#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000 +#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f +#define ROM_INDEX__ROM_INDEX_MASK 0xffffff +#define ROM_INDEX__ROM_INDEX__SHIFT 0x0 +#define ROM_DATA__ROM_DATA_MASK 0xffffffff +#define ROM_DATA__ROM_DATA__SHIFT 0x0 +#define ROM_START__ROM_START_MASK 0xffffff +#define ROM_START__ROM_START__SHIFT 0x0 +#define ROM_SW_CNTL__DATA_SIZE_MASK 0xffff +#define ROM_SW_CNTL__DATA_SIZE__SHIFT 0x0 +#define ROM_SW_CNTL__COMMAND_SIZE_MASK 0x30000 +#define ROM_SW_CNTL__COMMAND_SIZE__SHIFT 0x10 +#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE_MASK 0x40000 +#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE__SHIFT 0x12 +#define ROM_SW_STATUS__ROM_SW_DONE_MASK 0x1 +#define ROM_SW_STATUS__ROM_SW_DONE__SHIFT 0x0 +#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION_MASK 0xff +#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION__SHIFT 0x0 +#define ROM_SW_COMMAND__ROM_SW_ADDRESS_MASK 0xffffff00 +#define ROM_SW_COMMAND__ROM_SW_ADDRESS__SHIFT 0x8 +#define ROM_SW_DATA_1__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_1__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_2__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_2__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_3__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_3__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_4__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_4__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_5__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_5__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_6__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_6__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_7__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_7__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_8__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_8__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_9__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_9__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_10__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_10__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_11__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_11__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_12__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_12__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_13__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_13__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_14__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_14__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_15__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_15__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_16__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_16__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_17__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_17__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_18__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_18__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_19__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_19__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_20__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_20__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_21__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_21__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_22__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_22__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_23__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_23__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_24__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_24__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_25__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_25__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_26__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_26__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_27__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_27__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_28__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_28__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_29__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_29__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_30__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_30__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_31__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_31__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_32__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_32__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_33__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_33__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_34__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_34__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_35__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_35__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_36__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_36__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_37__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_37__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_38__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_38__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_39__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_39__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_40__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_40__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_41__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_41__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_42__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_42__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_43__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_43__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_44__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_44__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_45__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_45__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_46__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_46__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_47__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_47__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_48__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_48__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_49__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_49__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_50__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_50__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_51__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_51__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_52__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_52__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_53__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_53__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_54__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_54__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_55__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_55__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_56__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_56__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_57__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_57__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_58__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_58__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_59__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_59__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_60__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_60__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_61__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_61__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_62__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_62__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_63__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff +#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0 +#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0xf +#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0 +#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000 +#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e +#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000 +#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f +#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0xf +#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0 +#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000 +#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e +#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000 +#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f +#define GC_CAC_LKG_AGGR_LOWER__LKG_AGGR_31_0_MASK 0xffffffff +#define GC_CAC_LKG_AGGR_LOWER__LKG_AGGR_31_0__SHIFT 0x0 +#define GC_CAC_LKG_AGGR_UPPER__LKG_AGGR_63_32_MASK 0xffffffff +#define GC_CAC_LKG_AGGR_UPPER__LKG_AGGR_63_32__SHIFT 0x0 +#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0_MASK 0xffff +#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0__SHIFT 0x0 +#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG1_MASK 0xffff0000 +#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG1__SHIFT 0x10 +#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG2_MASK 0xffff +#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG2__SHIFT 0x0 +#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG3_MASK 0xffff0000 +#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG3__SHIFT 0x10 +#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG4_MASK 0xffff +#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG4__SHIFT 0x0 +#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG5_MASK 0xffff0000 +#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG5__SHIFT 0x10 +#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG6_MASK 0xffff +#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG6__SHIFT 0x0 +#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG7_MASK 0xffff0000 +#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG7__SHIFT 0x10 +#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG8_MASK 0xffff +#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG8__SHIFT 0x0 +#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG9_MASK 0xffff0000 +#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG9__SHIFT 0x10 +#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG10_MASK 0xffff +#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG10__SHIFT 0x0 +#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG11_MASK 0xffff0000 +#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG11__SHIFT 0x10 +#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG12_MASK 0xffff +#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG12__SHIFT 0x0 +#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG13_MASK 0xffff0000 +#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG13__SHIFT 0x10 +#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG14_MASK 0xffff +#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG14__SHIFT 0x0 +#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG15_MASK 0xffff0000 +#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG15__SHIFT 0x10 +#define GC_CAC_ACC_CU0__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU0__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU1__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU1__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU2__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU2__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU3__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU3__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU4__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU4__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU5__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU5__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU6__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU6__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU7__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU7__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU8__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU8__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU9__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU9__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU10__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU10__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU11__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU11__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU12__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU12__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU13__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU13__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU14__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU14__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_ACC_CU15__ACCUMULATOR_31_0_MASK 0xffffffff +#define GC_CAC_ACC_CU15__ACCUMULATOR_31_0__SHIFT 0x0 +#define GC_CAC_OVRD_CU__OVRRD_SELECT_MASK 0xffff +#define GC_CAC_OVRD_CU__OVRRD_SELECT__SHIFT 0x0 +#define GC_CAC_OVRD_CU__OVRRD_VALUE_MASK 0xffff0000 +#define GC_CAC_OVRD_CU__OVRRD_VALUE__SHIFT 0x10 + +#endif /* SMU_7_1_3_SH_MASK_H */ diff --git a/drivers/gpu/drm/amd/include/atom-bits.h b/drivers/gpu/drm/amd/include/atom-bits.h new file mode 100644 index 000000000..e8fae5c77 --- /dev/null +++ b/drivers/gpu/drm/amd/include/atom-bits.h @@ -0,0 +1,48 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Stanislaw Skowronek + */ + +#ifndef ATOM_BITS_H +#define ATOM_BITS_H + +static inline uint8_t get_u8(void *bios, int ptr) +{ + return ((unsigned char *)bios)[ptr]; +} +#define U8(ptr) get_u8(ctx->ctx->bios, (ptr)) +#define CU8(ptr) get_u8(ctx->bios, (ptr)) +static inline uint16_t get_u16(void *bios, int ptr) +{ + return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8); +} +#define U16(ptr) get_u16(ctx->ctx->bios, (ptr)) +#define CU16(ptr) get_u16(ctx->bios, (ptr)) +static inline uint32_t get_u32(void *bios, int ptr) +{ + return get_u16(bios, ptr)|(((uint32_t)get_u16(bios, ptr+2))<<16); +} +#define U32(ptr) get_u32(ctx->ctx->bios, (ptr)) +#define CU32(ptr) get_u32(ctx->bios, (ptr)) +#define CSTR(ptr) (((char *)(ctx->bios))+(ptr)) + +#endif diff --git a/drivers/gpu/drm/amd/include/atom-names.h b/drivers/gpu/drm/amd/include/atom-names.h new file mode 100644 index 000000000..6f907a5ff --- /dev/null +++ b/drivers/gpu/drm/amd/include/atom-names.h @@ -0,0 +1,100 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Stanislaw Skowronek + */ + +#ifndef ATOM_NAMES_H +#define ATOM_NAMES_H + +#include "atom.h" + +#ifdef ATOM_DEBUG + +#define ATOM_OP_NAMES_CNT 123 +static char *atom_op_names[ATOM_OP_NAMES_CNT] = { +"RESERVED", "MOVE_REG", "MOVE_PS", "MOVE_WS", "MOVE_FB", "MOVE_PLL", +"MOVE_MC", "AND_REG", "AND_PS", "AND_WS", "AND_FB", "AND_PLL", "AND_MC", +"OR_REG", "OR_PS", "OR_WS", "OR_FB", "OR_PLL", "OR_MC", "SHIFT_LEFT_REG", +"SHIFT_LEFT_PS", "SHIFT_LEFT_WS", "SHIFT_LEFT_FB", "SHIFT_LEFT_PLL", +"SHIFT_LEFT_MC", "SHIFT_RIGHT_REG", "SHIFT_RIGHT_PS", "SHIFT_RIGHT_WS", +"SHIFT_RIGHT_FB", "SHIFT_RIGHT_PLL", "SHIFT_RIGHT_MC", "MUL_REG", +"MUL_PS", "MUL_WS", "MUL_FB", "MUL_PLL", "MUL_MC", "DIV_REG", "DIV_PS", +"DIV_WS", "DIV_FB", "DIV_PLL", "DIV_MC", "ADD_REG", "ADD_PS", "ADD_WS", +"ADD_FB", "ADD_PLL", "ADD_MC", "SUB_REG", "SUB_PS", "SUB_WS", "SUB_FB", +"SUB_PLL", "SUB_MC", "SET_ATI_PORT", "SET_PCI_PORT", "SET_SYS_IO_PORT", +"SET_REG_BLOCK", "SET_FB_BASE", "COMPARE_REG", "COMPARE_PS", +"COMPARE_WS", "COMPARE_FB", "COMPARE_PLL", "COMPARE_MC", "SWITCH", +"JUMP", "JUMP_EQUAL", "JUMP_BELOW", "JUMP_ABOVE", "JUMP_BELOW_OR_EQUAL", +"JUMP_ABOVE_OR_EQUAL", "JUMP_NOT_EQUAL", "TEST_REG", "TEST_PS", "TEST_WS", +"TEST_FB", "TEST_PLL", "TEST_MC", "DELAY_MILLISEC", "DELAY_MICROSEC", +"CALL_TABLE", "REPEAT", "CLEAR_REG", "CLEAR_PS", "CLEAR_WS", "CLEAR_FB", +"CLEAR_PLL", "CLEAR_MC", "NOP", "EOT", "MASK_REG", "MASK_PS", "MASK_WS", +"MASK_FB", "MASK_PLL", "MASK_MC", "POST_CARD", "BEEP", "SAVE_REG", +"RESTORE_REG", "SET_DATA_BLOCK", "XOR_REG", "XOR_PS", "XOR_WS", "XOR_FB", +"XOR_PLL", "XOR_MC", "SHL_REG", "SHL_PS", "SHL_WS", "SHL_FB", "SHL_PLL", +"SHL_MC", "SHR_REG", "SHR_PS", "SHR_WS", "SHR_FB", "SHR_PLL", "SHR_MC", +"DEBUG", "CTB_DS", +}; + +#define ATOM_TABLE_NAMES_CNT 74 +static char *atom_table_names[ATOM_TABLE_NAMES_CNT] = { +"ASIC_Init", "GetDisplaySurfaceSize", "ASIC_RegistersInit", +"VRAM_BlockVenderDetection", "SetClocksRatio", "MemoryControllerInit", +"GPIO_PinInit", "MemoryParamAdjust", "DVOEncoderControl", +"GPIOPinControl", "SetEngineClock", "SetMemoryClock", "SetPixelClock", +"DynamicClockGating", "ResetMemoryDLL", "ResetMemoryDevice", +"MemoryPLLInit", "EnableMemorySelfRefresh", "AdjustMemoryController", +"EnableASIC_StaticPwrMgt", "ASIC_StaticPwrMgtStatusChange", +"DAC_LoadDetection", "TMDS2EncoderControl", "LCD1OutputControl", +"DAC1EncoderControl", "DAC2EncoderControl", "DVOOutputControl", +"CV1OutputControl", "SetCRTC_DPM_State", "TVEncoderControl", +"TMDS1EncoderControl", "LVDSEncoderControl", "TV1OutputControl", +"EnableScaler", "BlankCRTC", "EnableCRTC", "GetPixelClock", +"EnableVGA_Render", "EnableVGA_Access", "SetCRTC_Timing", +"SetCRTC_OverScan", "SetCRTC_Replication", "SelectCRTC_Source", +"EnableGraphSurfaces", "UpdateCRTC_DoubleBufferRegisters", +"LUT_AutoFill", "EnableHW_IconCursor", "GetMemoryClock", +"GetEngineClock", "SetCRTC_UsingDTDTiming", "TVBootUpStdPinDetection", +"DFP2OutputControl", "VRAM_BlockDetectionByStrap", "MemoryCleanUp", +"ReadEDIDFromHWAssistedI2C", "WriteOneByteToHWAssistedI2C", +"ReadHWAssistedI2CStatus", "SpeedFanControl", "PowerConnectorDetection", +"MC_Synchronization", "ComputeMemoryEnginePLL", "MemoryRefreshConversion", +"VRAM_GetCurrentInfoBlock", "DynamicMemorySettings", "MemoryTraining", +"EnableLVDS_SS", "DFP1OutputControl", "SetVoltage", "CRT1OutputControl", +"CRT2OutputControl", "SetupHWAssistedI2CStatus", "ClockSource", +"MemoryDeviceInit", "EnableYUV", +}; + +#define ATOM_IO_NAMES_CNT 5 +static char *atom_io_names[ATOM_IO_NAMES_CNT] = { +"MM", "PLL", "MC", "PCIE", "PCIE PORT", +}; + +#else + +#define ATOM_OP_NAMES_CNT 0 +#define ATOM_TABLE_NAMES_CNT 0 +#define ATOM_IO_NAMES_CNT 0 + +#endif + +#endif diff --git a/drivers/gpu/drm/amd/include/atom-types.h b/drivers/gpu/drm/amd/include/atom-types.h new file mode 100644 index 000000000..1125b866c --- /dev/null +++ b/drivers/gpu/drm/amd/include/atom-types.h @@ -0,0 +1,42 @@ +/* + * Copyright 2008 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Dave Airlie + */ + +#ifndef ATOM_TYPES_H +#define ATOM_TYPES_H + +/* sync atom types to kernel types */ + +typedef uint16_t USHORT; +typedef uint32_t ULONG; +typedef uint8_t UCHAR; + + +#ifndef ATOM_BIG_ENDIAN +#if defined(__BIG_ENDIAN) +#define ATOM_BIG_ENDIAN 1 +#else +#define ATOM_BIG_ENDIAN 0 +#endif +#endif +#endif diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h new file mode 100644 index 000000000..44c5d4a4d --- /dev/null +++ b/drivers/gpu/drm/amd/include/atombios.h @@ -0,0 +1,8555 @@ +/* + * Copyright 2006-2007 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + + +/****************************************************************************/ +/*Portion I: Definitions shared between VBIOS and Driver */ +/****************************************************************************/ + +#ifndef _ATOMBIOS_H +#define _ATOMBIOS_H + +#define ATOM_VERSION_MAJOR 0x00020000 +#define ATOM_VERSION_MINOR 0x00000002 + +#define ATOM_HEADER_VERSION (ATOM_VERSION_MAJOR | ATOM_VERSION_MINOR) + +/* Endianness should be specified before inclusion, + * default to little endian + */ +#ifndef ATOM_BIG_ENDIAN +#error Endian not specified +#endif + +#ifdef _H2INC + #ifndef ULONG + typedef unsigned long ULONG; + #endif + + #ifndef UCHAR + typedef unsigned char UCHAR; + #endif + + #ifndef USHORT + typedef unsigned short USHORT; + #endif +#endif + +#define ATOM_DAC_A 0 +#define ATOM_DAC_B 1 +#define ATOM_EXT_DAC 2 + +#define ATOM_CRTC1 0 +#define ATOM_CRTC2 1 +#define ATOM_CRTC3 2 +#define ATOM_CRTC4 3 +#define ATOM_CRTC5 4 +#define ATOM_CRTC6 5 + +#define ATOM_UNDERLAY_PIPE0 16 +#define ATOM_UNDERLAY_PIPE1 17 + +#define ATOM_CRTC_INVALID 0xFF + +#define ATOM_DIGA 0 +#define ATOM_DIGB 1 + +#define ATOM_PPLL1 0 +#define ATOM_PPLL2 1 +#define ATOM_DCPLL 2 +#define ATOM_PPLL0 2 +#define ATOM_PPLL3 3 + +#define ATOM_EXT_PLL1 8 +#define ATOM_EXT_PLL2 9 +#define ATOM_EXT_CLOCK 10 +#define ATOM_PPLL_INVALID 0xFF + +#define ENCODER_REFCLK_SRC_P1PLL 0 +#define ENCODER_REFCLK_SRC_P2PLL 1 +#define ENCODER_REFCLK_SRC_DCPLL 2 +#define ENCODER_REFCLK_SRC_EXTCLK 3 +#define ENCODER_REFCLK_SRC_INVALID 0xFF + +#define ATOM_SCALER_DISABLE 0 //For Fudo, it's bypass and auto-cengter & no replication +#define ATOM_SCALER_CENTER 1 //For Fudo, it's bypass and auto-center & auto replication +#define ATOM_SCALER_EXPANSION 2 //For Fudo, it's 2 Tap alpha blending mode +#define ATOM_SCALER_MULTI_EX 3 //For Fudo only, it's multi-tap mode only used to drive TV or CV, only used by Bios + +#define ATOM_DISABLE 0 +#define ATOM_ENABLE 1 +#define ATOM_LCD_BLOFF (ATOM_DISABLE+2) +#define ATOM_LCD_BLON (ATOM_ENABLE+2) +#define ATOM_LCD_BL_BRIGHTNESS_CONTROL (ATOM_ENABLE+3) +#define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5) +#define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5) +#define ATOM_ENCODER_INIT (ATOM_DISABLE+7) +#define ATOM_INIT (ATOM_DISABLE+7) +#define ATOM_GET_STATUS (ATOM_DISABLE+8) + +#define ATOM_BLANKING 1 +#define ATOM_BLANKING_OFF 0 + + +#define ATOM_CRT1 0 +#define ATOM_CRT2 1 + +#define ATOM_TV_NTSC 1 +#define ATOM_TV_NTSCJ 2 +#define ATOM_TV_PAL 3 +#define ATOM_TV_PALM 4 +#define ATOM_TV_PALCN 5 +#define ATOM_TV_PALN 6 +#define ATOM_TV_PAL60 7 +#define ATOM_TV_SECAM 8 +#define ATOM_TV_CV 16 + +#define ATOM_DAC1_PS2 1 +#define ATOM_DAC1_CV 2 +#define ATOM_DAC1_NTSC 3 +#define ATOM_DAC1_PAL 4 + +#define ATOM_DAC2_PS2 ATOM_DAC1_PS2 +#define ATOM_DAC2_CV ATOM_DAC1_CV +#define ATOM_DAC2_NTSC ATOM_DAC1_NTSC +#define ATOM_DAC2_PAL ATOM_DAC1_PAL + +#define ATOM_PM_ON 0 +#define ATOM_PM_STANDBY 1 +#define ATOM_PM_SUSPEND 2 +#define ATOM_PM_OFF 3 + +// For ATOM_LVDS_INFO_V12 +// Bit0:{=0:single, =1:dual}, +// Bit1 {=0:666RGB, =1:888RGB}, +// Bit2:3:{Grey level} +// Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} +#define ATOM_PANEL_MISC_DUAL 0x00000001 +#define ATOM_PANEL_MISC_888RGB 0x00000002 +#define ATOM_PANEL_MISC_GREY_LEVEL 0x0000000C +#define ATOM_PANEL_MISC_FPDI 0x00000010 +#define ATOM_PANEL_MISC_GREY_LEVEL_SHIFT 2 +#define ATOM_PANEL_MISC_SPATIAL 0x00000020 +#define ATOM_PANEL_MISC_TEMPORAL 0x00000040 +#define ATOM_PANEL_MISC_API_ENABLED 0x00000080 + +#define MEMTYPE_DDR1 "DDR1" +#define MEMTYPE_DDR2 "DDR2" +#define MEMTYPE_DDR3 "DDR3" +#define MEMTYPE_DDR4 "DDR4" + +#define ASIC_BUS_TYPE_PCI "PCI" +#define ASIC_BUS_TYPE_AGP "AGP" +#define ASIC_BUS_TYPE_PCIE "PCI_EXPRESS" + +//Maximum size of that FireGL flag string +#define ATOM_FIREGL_FLAG_STRING "FGL" //Flag used to enable FireGL Support +#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 //sizeof( ATOM_FIREGL_FLAG_STRING ) + +#define ATOM_FAKE_DESKTOP_STRING "DSK" //Flag used to enable mobile ASIC on Desktop +#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING + +#define ATOM_M54T_FLAG_STRING "M54T" //Flag used to enable M54T Support +#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 //sizeof( ATOM_M54T_FLAG_STRING ) + +#define HW_ASSISTED_I2C_STATUS_FAILURE 2 +#define HW_ASSISTED_I2C_STATUS_SUCCESS 1 + +#pragma pack(1) // BIOS data must use byte aligment + +// Define offset to location of ROM header. +#define OFFSET_TO_POINTER_TO_ATOM_ROM_HEADER 0x00000048L +#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE 0x00000002L + +#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE 0x94 +#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 //including the terminator 0x0! +#define OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER 0x002f +#define OFFSET_TO_GET_ATOMBIOS_STRINGS_START 0x006e + +/****************************************************************************/ +// Common header for all tables (Data table, Command table). +// Every table pointed _ATOM_MASTER_DATA_TABLE has this common header. +// And the pointer actually points to this header. +/****************************************************************************/ + +typedef struct _ATOM_COMMON_TABLE_HEADER +{ + USHORT usStructureSize; + UCHAR ucTableFormatRevision; //Change it when the Parser is not backward compatible + UCHAR ucTableContentRevision; //Change it only when the table needs to change but the firmware + //Image can't be updated, while Driver needs to carry the new table! +}ATOM_COMMON_TABLE_HEADER; + +/****************************************************************************/ +// Structure stores the ROM header. +/****************************************************************************/ +typedef struct _ATOM_ROM_HEADER +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR uaFirmWareSignature[4]; //Signature to distinguish between Atombios and non-atombios, + //atombios should init it as "ATOM", don't change the position + USHORT usBiosRuntimeSegmentAddress; + USHORT usProtectedModeInfoOffset; + USHORT usConfigFilenameOffset; + USHORT usCRC_BlockOffset; + USHORT usBIOS_BootupMessageOffset; + USHORT usInt10Offset; + USHORT usPciBusDevInitCode; + USHORT usIoBaseAddress; + USHORT usSubsystemVendorID; + USHORT usSubsystemID; + USHORT usPCI_InfoOffset; + USHORT usMasterCommandTableOffset;//Offest for SW to get all command table offsets, Don't change the position + USHORT usMasterDataTableOffset; //Offest for SW to get all data table offsets, Don't change the position + UCHAR ucExtendedFunctionCode; + UCHAR ucReserved; +}ATOM_ROM_HEADER; + +//==============================Command Table Portion==================================== + + +/****************************************************************************/ +// Structures used in Command.mtb +/****************************************************************************/ +typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{ + USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1 + USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON + USHORT ASIC_RegistersInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init + USHORT VRAM_BlockVenderDetection; //Atomic Table, used only by Bios + USHORT DIGxEncoderControl; //Only used by Bios + USHORT MemoryControllerInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init + USHORT EnableCRTCMemReq; //Function Table,directly used by various SW components,latest version 2.1 + USHORT MemoryParamAdjust; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed + USHORT DVOEncoderControl; //Function Table,directly used by various SW components,latest version 1.2 + USHORT GPIOPinControl; //Atomic Table, only used by Bios + USHORT SetEngineClock; //Function Table,directly used by various SW components,latest version 1.1 + USHORT SetMemoryClock; //Function Table,directly used by various SW components,latest version 1.1 + USHORT SetPixelClock; //Function Table,directly used by various SW components,latest version 1.2 + USHORT EnableDispPowerGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init + USHORT ResetMemoryDLL; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock + USHORT ResetMemoryDevice; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock + USHORT MemoryPLLInit; //Atomic Table, used only by Bios + USHORT AdjustDisplayPll; //Atomic Table, used by various SW componentes. + USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock + USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios + USHORT SetUniphyInstance; //Atomic Table, only used by Bios + USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2 + USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3 + USHORT HW_Misc_Operation; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT DAC1EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT DAC2EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT DVOOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT CV1OutputControl; //Atomic Table, Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead + USHORT GetConditionalGoldenSetting; //Only used by Bios + USHORT SMC_Init; //Function Table,directly used by various SW components,latest version 1.1 + USHORT PatchMCSetting; //only used by BIOS + USHORT MC_SEQ_Control; //only used by BIOS + USHORT Gfx_Harvesting; //Atomic Table, Obsolete from Ry6xx, Now only used by BIOS for GFX harvesting + USHORT EnableScaler; //Atomic Table, used only by Bios + USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT EnableCRTC; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT GetPixelClock; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT EnableVGA_Render; //Function Table,directly used by various SW components,latest version 1.1 + USHORT GetSCLKOverMCLKRatio; //Atomic Table, only used by Bios + USHORT SetCRTC_Timing; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT SetCRTC_OverScan; //Atomic Table, used by various SW components,latest version 1.1 + USHORT SetCRTC_Replication; //Atomic Table, used only by Bios + USHORT SelectCRTC_Source; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT EnableGraphSurfaces; //Atomic Table, used only by Bios + USHORT UpdateCRTC_DoubleBufferRegisters; //Atomic Table, used only by Bios + USHORT LUT_AutoFill; //Atomic Table, only used by Bios + USHORT EnableHW_IconCursor; //Atomic Table, only used by Bios + USHORT GetMemoryClock; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT GetEngineClock; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT SetCRTC_UsingDTDTiming; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT ExternalEncoderControl; //Atomic Table, directly used by various SW components,latest version 2.1 + USHORT LVTMAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT VRAM_BlockDetectionByStrap; //Atomic Table, used only by Bios + USHORT MemoryCleanUp; //Atomic Table, only used by Bios + USHORT ProcessI2cChannelTransaction; //Function Table,only used by Bios + USHORT WriteOneByteToHWAssistedI2C; //Function Table,indirectly used by various SW components + USHORT ReadHWAssistedI2CStatus; //Atomic Table, indirectly used by various SW components + USHORT SpeedFanControl; //Function Table,indirectly used by various SW components,called from ASIC_Init + USHORT PowerConnectorDetection; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT MC_Synchronization; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock + USHORT ComputeMemoryEnginePLL; //Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock + USHORT MemoryRefreshConversion; //Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock + USHORT VRAM_GetCurrentInfoBlock; //Atomic Table, used only by Bios + USHORT DynamicMemorySettings; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock + USHORT MemoryTraining; //Atomic Table, used only by Bios + USHORT EnableSpreadSpectrumOnPPLL; //Atomic Table, directly used by various SW components,latest version 1.2 + USHORT TMDSAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT SetVoltage; //Function Table,directly and/or indirectly used by various SW components,latest version 1.1 + USHORT DAC1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT ReadEfuseValue; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT ComputeMemoryClockParam; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C" + USHORT ClockSource; //Atomic Table, indirectly used by various SW components,called from ASIC_Init + USHORT MemoryDeviceInit; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock + USHORT GetDispObjectInfo; //Atomic Table, indirectly used by various SW components,called from EnableVGARender + USHORT DIG1EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1 + USHORT DIG2EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1 + USHORT DIG1TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1 + USHORT DIG2TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1 + USHORT ProcessAuxChannelTransaction; //Function Table,only used by Bios + USHORT DPEncoderService; //Function Table,only used by Bios + USHORT GetVoltageInfo; //Function Table,only used by Bios since SI +}ATOM_MASTER_LIST_OF_COMMAND_TABLES; + +// For backward compatible +#define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction +#define DPTranslatorControl DIG2EncoderControl +#define UNIPHYTransmitterControl DIG1TransmitterControl +#define LVTMATransmitterControl DIG2TransmitterControl +#define SetCRTC_DPM_State GetConditionalGoldenSetting +#define ASIC_StaticPwrMgtStatusChange SetUniphyInstance +#define HPDInterruptService ReadHWAssistedI2CStatus +#define EnableVGA_Access GetSCLKOverMCLKRatio +#define EnableYUV GetDispObjectInfo +#define DynamicClockGating EnableDispPowerGating +#define SetupHWAssistedI2CStatus ComputeMemoryClockParam +#define DAC2OutputControl ReadEfuseValue + +#define TMDSAEncoderControl PatchMCSetting +#define LVDSEncoderControl MC_SEQ_Control +#define LCD1OutputControl HW_Misc_Operation +#define TV1OutputControl Gfx_Harvesting +#define TVEncoderControl SMC_Init + +typedef struct _ATOM_MASTER_COMMAND_TABLE +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables; +}ATOM_MASTER_COMMAND_TABLE; + +/****************************************************************************/ +// Structures used in every command table +/****************************************************************************/ +typedef struct _ATOM_TABLE_ATTRIBUTE +{ +#if ATOM_BIG_ENDIAN + USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag + USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword), + USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword), +#else + USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword), + USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword), + USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag +#endif +}ATOM_TABLE_ATTRIBUTE; + +/****************************************************************************/ +// Common header for all command tables. +// Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header. +// And the pointer actually points to this header. +/****************************************************************************/ +typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER +{ + ATOM_COMMON_TABLE_HEADER CommonHeader; + ATOM_TABLE_ATTRIBUTE TableAttribute; +}ATOM_COMMON_ROM_COMMAND_TABLE_HEADER; + +/****************************************************************************/ +// Structures used by ComputeMemoryEnginePLLTable +/****************************************************************************/ + +#define COMPUTE_MEMORY_PLL_PARAM 1 +#define COMPUTE_ENGINE_PLL_PARAM 2 +#define ADJUST_MC_SETTING_PARAM 3 + +/****************************************************************************/ +// Structures used by AdjustMemoryControllerTable +/****************************************************************************/ +typedef struct _ATOM_ADJUST_MEMORY_CLOCK_FREQ +{ +#if ATOM_BIG_ENDIAN + ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block + ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0] + ULONG ulClockFreq:24; +#else + ULONG ulClockFreq:24; + ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0] + ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block +#endif +}ATOM_ADJUST_MEMORY_CLOCK_FREQ; +#define POINTER_RETURN_FLAG 0x80 + +typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS +{ + ULONG ulClock; //When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div + UCHAR ucAction; //0:reserved //1:Memory //2:Engine + UCHAR ucReserved; //may expand to return larger Fbdiv later + UCHAR ucFbDiv; //return value + UCHAR ucPostDiv; //return value +}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS; + +typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 +{ + ULONG ulClock; //When return, [23:0] return real clock + UCHAR ucAction; //0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register + USHORT usFbDiv; //return Feedback value to be written to register + UCHAR ucPostDiv; //return post div to be written to register +}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2; + +#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS + +#define SET_CLOCK_FREQ_MASK 0x00FFFFFF //Clock change tables only take bit [23:0] as the requested clock value +#define USE_NON_BUS_CLOCK_MASK 0x01000000 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) +#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 //Only applicable to memory clock change, when set, using memory self refresh during clock transition +#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change +#define FIRST_TIME_CHANGE_CLOCK 0x08000000 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup +#define SKIP_SW_PROGRAM_PLL 0x10000000 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL +#define USE_SS_ENABLED_PIXEL_CLOCK USE_NON_BUS_CLOCK_MASK + +#define b3USE_NON_BUS_CLOCK_MASK 0x01 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) +#define b3USE_MEMORY_SELF_REFRESH 0x02 //Only applicable to memory clock change, when set, using memory self refresh during clock transition +#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change +#define b3FIRST_TIME_CHANGE_CLOCK 0x08 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup +#define b3SKIP_SW_PROGRAM_PLL 0x10 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL +#define b3DRAM_SELF_REFRESH_EXIT 0x20 //Applicable to DRAM self refresh exit only. when set, it means it will go to program DRAM self refresh exit path + +typedef struct _ATOM_COMPUTE_CLOCK_FREQ +{ +#if ATOM_BIG_ENDIAN + ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM + ULONG ulClockFreq:24; // in unit of 10kHz +#else + ULONG ulClockFreq:24; // in unit of 10kHz + ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM +#endif +}ATOM_COMPUTE_CLOCK_FREQ; + +typedef struct _ATOM_S_MPLL_FB_DIVIDER +{ + USHORT usFbDivFrac; + USHORT usFbDiv; +}ATOM_S_MPLL_FB_DIVIDER; + +typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 +{ + union + { + ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter + ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter + }; + UCHAR ucRefDiv; //Output Parameter + UCHAR ucPostDiv; //Output Parameter + UCHAR ucCntlFlag; //Output Parameter + UCHAR ucReserved; +}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3; + +// ucCntlFlag +#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN 1 +#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE 2 +#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE 4 +#define ATOM_PLL_CNTL_FLAG_SPLL_ISPARE_9 8 + + +// V4 are only used for APU which PLL outside GPU +typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 +{ +#if ATOM_BIG_ENDIAN + ULONG ucPostDiv:8; //return parameter: post divider which is used to program to register directly + ULONG ulClock:24; //Input= target clock, output = actual clock +#else + ULONG ulClock:24; //Input= target clock, output = actual clock + ULONG ucPostDiv:8; //return parameter: post divider which is used to program to register directly +#endif +}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4; + +typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 +{ + union + { + ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter + ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter + }; + UCHAR ucRefDiv; //Output Parameter + UCHAR ucPostDiv; //Output Parameter + union + { + UCHAR ucCntlFlag; //Output Flags + UCHAR ucInputFlag; //Input Flags. ucInputFlag[0] - Strobe(1)/Performance(0) mode + }; + UCHAR ucReserved; +}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5; + + +typedef struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6 +{ + ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter + ULONG ulReserved[2]; +}COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6; + +//ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag +#define COMPUTE_GPUCLK_INPUT_FLAG_CLK_TYPE_MASK 0x0f +#define COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK 0x00 +#define COMPUTE_GPUCLK_INPUT_FLAG_SCLK 0x01 + + +typedef struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 +{ + COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 ulClock; //Output Parameter: ucPostDiv=DFS divider + ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter: PLL FB divider + UCHAR ucPllRefDiv; //Output Parameter: PLL ref divider + UCHAR ucPllPostDiv; //Output Parameter: PLL post divider + UCHAR ucPllCntlFlag; //Output Flags: control flag + UCHAR ucReserved; +}COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6; + +//ucPllCntlFlag +#define SPLL_CNTL_FLAG_VCO_MODE_MASK 0x03 + + +// ucInputFlag +#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode + +// use for ComputeMemoryClockParamTable +typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 +{ + union + { + ULONG ulClock; + ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output:UPPER_WORD=FB_DIV_INTEGER, LOWER_WORD=FB_DIV_FRAC shl (16-FB_FRACTION_BITS) + }; + UCHAR ucDllSpeed; //Output + UCHAR ucPostDiv; //Output + union{ + UCHAR ucInputFlag; //Input : ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN: 1-StrobeMode, 0-PerformanceMode + UCHAR ucPllCntlFlag; //Output: + }; + UCHAR ucBWCntl; +}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1; + +// definition of ucInputFlag +#define MPLL_INPUT_FLAG_STROBE_MODE_EN 0x01 +// definition of ucPllCntlFlag +#define MPLL_CNTL_FLAG_VCO_MODE_MASK 0x03 +#define MPLL_CNTL_FLAG_BYPASS_DQ_PLL 0x04 +#define MPLL_CNTL_FLAG_QDR_ENABLE 0x08 +#define MPLL_CNTL_FLAG_AD_HALF_RATE 0x10 + +//MPLL_CNTL_FLAG_BYPASS_AD_PLL has a wrong name, should be BYPASS_DQ_PLL +#define MPLL_CNTL_FLAG_BYPASS_AD_PLL 0x04 + +typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER +{ + ATOM_COMPUTE_CLOCK_FREQ ulClock; + ULONG ulReserved[2]; +}DYNAMICE_MEMORY_SETTINGS_PARAMETER; + +typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER +{ + ATOM_COMPUTE_CLOCK_FREQ ulClock; + ULONG ulMemoryClock; + ULONG ulReserved; +}DYNAMICE_ENGINE_SETTINGS_PARAMETER; + +/****************************************************************************/ +// Structures used by SetEngineClockTable +/****************************************************************************/ +typedef struct _SET_ENGINE_CLOCK_PARAMETERS +{ + ULONG ulTargetEngineClock; //In 10Khz unit +}SET_ENGINE_CLOCK_PARAMETERS; + +typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION +{ + ULONG ulTargetEngineClock; //In 10Khz unit + COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved; +}SET_ENGINE_CLOCK_PS_ALLOCATION; + +/****************************************************************************/ +// Structures used by SetMemoryClockTable +/****************************************************************************/ +typedef struct _SET_MEMORY_CLOCK_PARAMETERS +{ + ULONG ulTargetMemoryClock; //In 10Khz unit +}SET_MEMORY_CLOCK_PARAMETERS; + +typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION +{ + ULONG ulTargetMemoryClock; //In 10Khz unit + COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved; +}SET_MEMORY_CLOCK_PS_ALLOCATION; + +/****************************************************************************/ +// Structures used by ASIC_Init.ctb +/****************************************************************************/ +typedef struct _ASIC_INIT_PARAMETERS +{ + ULONG ulDefaultEngineClock; //In 10Khz unit + ULONG ulDefaultMemoryClock; //In 10Khz unit +}ASIC_INIT_PARAMETERS; + +typedef struct _ASIC_INIT_PS_ALLOCATION +{ + ASIC_INIT_PARAMETERS sASICInitClocks; + SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; //Caller doesn't need to init this structure +}ASIC_INIT_PS_ALLOCATION; + +typedef struct _ASIC_INIT_CLOCK_PARAMETERS +{ + ULONG ulClkFreqIn10Khz:24; + ULONG ucClkFlag:8; +}ASIC_INIT_CLOCK_PARAMETERS; + +typedef struct _ASIC_INIT_PARAMETERS_V1_2 +{ + ASIC_INIT_CLOCK_PARAMETERS asSclkClock; //In 10Khz unit + ASIC_INIT_CLOCK_PARAMETERS asMemClock; //In 10Khz unit +}ASIC_INIT_PARAMETERS_V1_2; + +typedef struct _ASIC_INIT_PS_ALLOCATION_V1_2 +{ + ASIC_INIT_PARAMETERS_V1_2 sASICInitClocks; + ULONG ulReserved[8]; +}ASIC_INIT_PS_ALLOCATION_V1_2; + +/****************************************************************************/ +// Structure used by DynamicClockGatingTable.ctb +/****************************************************************************/ +typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS +{ + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE + UCHAR ucPadding[3]; +}DYNAMIC_CLOCK_GATING_PARAMETERS; +#define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS + +/****************************************************************************/ +// Structure used by EnableDispPowerGatingTable.ctb +/****************************************************************************/ +typedef struct _ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 +{ + UCHAR ucDispPipeId; // ATOM_CRTC1, ATOM_CRTC2, ... + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE + UCHAR ucPadding[2]; +}ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1; + +typedef struct _ENABLE_DISP_POWER_GATING_PS_ALLOCATION +{ + UCHAR ucDispPipeId; // ATOM_CRTC1, ATOM_CRTC2, ... + UCHAR ucEnable; // ATOM_ENABLE/ATOM_DISABLE/ATOM_INIT + UCHAR ucPadding[2]; + ULONG ulReserved[4]; +}ENABLE_DISP_POWER_GATING_PS_ALLOCATION; + +/****************************************************************************/ +// Structure used by EnableASIC_StaticPwrMgtTable.ctb +/****************************************************************************/ +typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS +{ + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE + UCHAR ucPadding[3]; +}ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS; +#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS + +/****************************************************************************/ +// Structures used by DAC_LoadDetectionTable.ctb +/****************************************************************************/ +typedef struct _DAC_LOAD_DETECTION_PARAMETERS +{ + USHORT usDeviceID; //{ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT} + UCHAR ucDacType; //{ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC} + UCHAR ucMisc; //Valid only when table revision =1.3 and above +}DAC_LOAD_DETECTION_PARAMETERS; + +// DAC_LOAD_DETECTION_PARAMETERS.ucMisc +#define DAC_LOAD_MISC_YPrPb 0x01 + +typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION +{ + DAC_LOAD_DETECTION_PARAMETERS sDacload; + ULONG Reserved[2];// Don't set this one, allocation for EXT DAC +}DAC_LOAD_DETECTION_PS_ALLOCATION; + +/****************************************************************************/ +// Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb +/****************************************************************************/ +typedef struct _DAC_ENCODER_CONTROL_PARAMETERS +{ + USHORT usPixelClock; // in 10KHz; for bios convenient + UCHAR ucDacStandard; // See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0) + UCHAR ucAction; // 0: turn off encoder + // 1: setup and turn on encoder + // 7: ATOM_ENCODER_INIT Initialize DAC +}DAC_ENCODER_CONTROL_PARAMETERS; + +#define DAC_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PARAMETERS + +/****************************************************************************/ +// Structures used by DIG1EncoderControlTable +// DIG2EncoderControlTable +// ExternalEncoderControlTable +/****************************************************************************/ +typedef struct _DIG_ENCODER_CONTROL_PARAMETERS +{ + USHORT usPixelClock; // in 10KHz; for bios convenient + UCHAR ucConfig; + // [2] Link Select: + // =0: PHY linkA if bfLane<3 + // =1: PHY linkB if bfLanes<3 + // =0: PHY linkA+B if bfLanes=3 + // [3] Transmitter Sel + // =0: UNIPHY or PCIEPHY + // =1: LVTMA + UCHAR ucAction; // =0: turn off encoder + // =1: turn on encoder + UCHAR ucEncoderMode; + // =0: DP encoder + // =1: LVDS encoder + // =2: DVI encoder + // =3: HDMI encoder + // =4: SDVO encoder + UCHAR ucLaneNum; // how many lanes to enable + UCHAR ucReserved[2]; +}DIG_ENCODER_CONTROL_PARAMETERS; +#define DIG_ENCODER_CONTROL_PS_ALLOCATION DIG_ENCODER_CONTROL_PARAMETERS +#define EXTERNAL_ENCODER_CONTROL_PARAMETER DIG_ENCODER_CONTROL_PARAMETERS + +//ucConfig +#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01 +#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00 +#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01 +#define ATOM_ENCODER_CONFIG_DPLINKRATE_5_40GHZ 0x02 +#define ATOM_ENCODER_CONFIG_LINK_SEL_MASK 0x04 +#define ATOM_ENCODER_CONFIG_LINKA 0x00 +#define ATOM_ENCODER_CONFIG_LINKB 0x04 +#define ATOM_ENCODER_CONFIG_LINKA_B ATOM_TRANSMITTER_CONFIG_LINKA +#define ATOM_ENCODER_CONFIG_LINKB_A ATOM_ENCODER_CONFIG_LINKB +#define ATOM_ENCODER_CONFIG_TRANSMITTER_SEL_MASK 0x08 +#define ATOM_ENCODER_CONFIG_UNIPHY 0x00 +#define ATOM_ENCODER_CONFIG_LVTMA 0x08 +#define ATOM_ENCODER_CONFIG_TRANSMITTER1 0x00 +#define ATOM_ENCODER_CONFIG_TRANSMITTER2 0x08 +#define ATOM_ENCODER_CONFIG_DIGB 0x80 // VBIOS Internal use, outside SW should set this bit=0 +// ucAction +// ATOM_ENABLE: Enable Encoder +// ATOM_DISABLE: Disable Encoder + +//ucEncoderMode +#define ATOM_ENCODER_MODE_DP 0 +#define ATOM_ENCODER_MODE_LVDS 1 +#define ATOM_ENCODER_MODE_DVI 2 +#define ATOM_ENCODER_MODE_HDMI 3 +#define ATOM_ENCODER_MODE_SDVO 4 +#define ATOM_ENCODER_MODE_DP_AUDIO 5 +#define ATOM_ENCODER_MODE_TV 13 +#define ATOM_ENCODER_MODE_CV 14 +#define ATOM_ENCODER_MODE_CRT 15 +#define ATOM_ENCODER_MODE_DVO 16 +#define ATOM_ENCODER_MODE_DP_SST ATOM_ENCODER_MODE_DP // For DP1.2 +#define ATOM_ENCODER_MODE_DP_MST 5 // For DP1.2 + + +typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 +{ +#if ATOM_BIG_ENDIAN + UCHAR ucReserved1:2; + UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF + UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F + UCHAR ucReserved:1; + UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz +#else + UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz + UCHAR ucReserved:1; + UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F + UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF + UCHAR ucReserved1:2; +#endif +}ATOM_DIG_ENCODER_CONFIG_V2; + + +typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 +{ + USHORT usPixelClock; // in 10KHz; for bios convenient + ATOM_DIG_ENCODER_CONFIG_V2 acConfig; + UCHAR ucAction; + UCHAR ucEncoderMode; + // =0: DP encoder + // =1: LVDS encoder + // =2: DVI encoder + // =3: HDMI encoder + // =4: SDVO encoder + UCHAR ucLaneNum; // how many lanes to enable + UCHAR ucStatus; // = DP_LINK_TRAINING_COMPLETE or DP_LINK_TRAINING_INCOMPLETE, only used by VBIOS with command ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS + UCHAR ucReserved; +}DIG_ENCODER_CONTROL_PARAMETERS_V2; + +//ucConfig +#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK 0x01 +#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ 0x00 +#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ 0x01 +#define ATOM_ENCODER_CONFIG_V2_LINK_SEL_MASK 0x04 +#define ATOM_ENCODER_CONFIG_V2_LINKA 0x00 +#define ATOM_ENCODER_CONFIG_V2_LINKB 0x04 +#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER_SEL_MASK 0x18 +#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER1 0x00 +#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2 0x08 +#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3 0x10 + +// ucAction: +// ATOM_DISABLE +// ATOM_ENABLE +#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08 +#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09 +#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a +#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3 0x13 +#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b +#define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c +#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d +#define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS 0x0e +#define ATOM_ENCODER_CMD_SETUP 0x0f +#define ATOM_ENCODER_CMD_SETUP_PANEL_MODE 0x10 + +// ucStatus +#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10 +#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00 + +//ucTableFormatRevision=1 +//ucTableContentRevision=3 +// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver +typedef struct _ATOM_DIG_ENCODER_CONFIG_V3 +{ +#if ATOM_BIG_ENDIAN + UCHAR ucReserved1:1; + UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F) + UCHAR ucReserved:3; + UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz +#else + UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz + UCHAR ucReserved:3; + UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F) + UCHAR ucReserved1:1; +#endif +}ATOM_DIG_ENCODER_CONFIG_V3; + +#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03 +#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00 +#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01 +#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70 +#define ATOM_ENCODER_CONFIG_V3_DIG0_ENCODER 0x00 +#define ATOM_ENCODER_CONFIG_V3_DIG1_ENCODER 0x10 +#define ATOM_ENCODER_CONFIG_V3_DIG2_ENCODER 0x20 +#define ATOM_ENCODER_CONFIG_V3_DIG3_ENCODER 0x30 +#define ATOM_ENCODER_CONFIG_V3_DIG4_ENCODER 0x40 +#define ATOM_ENCODER_CONFIG_V3_DIG5_ENCODER 0x50 + +typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3 +{ + USHORT usPixelClock; // in 10KHz; for bios convenient + ATOM_DIG_ENCODER_CONFIG_V3 acConfig; + UCHAR ucAction; + union{ + UCHAR ucEncoderMode; + // =0: DP encoder + // =1: LVDS encoder + // =2: DVI encoder + // =3: HDMI encoder + // =4: SDVO encoder + // =5: DP audio + UCHAR ucPanelMode; // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE + // =0: external DP + // =0x1: internal DP2 + // =0x11: internal DP1 for NutMeg/Travis DP translator + }; + UCHAR ucLaneNum; // how many lanes to enable + UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP + UCHAR ucReserved; +}DIG_ENCODER_CONTROL_PARAMETERS_V3; + +//ucTableFormatRevision=1 +//ucTableContentRevision=4 +// start from NI +// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver +typedef struct _ATOM_DIG_ENCODER_CONFIG_V4 +{ +#if ATOM_BIG_ENDIAN + UCHAR ucReserved1:1; + UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F) + UCHAR ucReserved:2; + UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version +#else + UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version + UCHAR ucReserved:2; + UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F) + UCHAR ucReserved1:1; +#endif +}ATOM_DIG_ENCODER_CONFIG_V4; + +#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_MASK 0x03 +#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ 0x00 +#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ 0x01 +#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ 0x02 +#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ 0x03 +#define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL 0x70 +#define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER 0x00 +#define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER 0x10 +#define ATOM_ENCODER_CONFIG_V4_DIG2_ENCODER 0x20 +#define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER 0x30 +#define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER 0x40 +#define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER 0x50 +#define ATOM_ENCODER_CONFIG_V4_DIG6_ENCODER 0x60 + +typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4 +{ + USHORT usPixelClock; // in 10KHz; for bios convenient + union{ + ATOM_DIG_ENCODER_CONFIG_V4 acConfig; + UCHAR ucConfig; + }; + UCHAR ucAction; + union{ + UCHAR ucEncoderMode; + // =0: DP encoder + // =1: LVDS encoder + // =2: DVI encoder + // =3: HDMI encoder + // =4: SDVO encoder + // =5: DP audio + UCHAR ucPanelMode; // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE + // =0: external DP + // =0x1: internal DP2 + // =0x11: internal DP1 for NutMeg/Travis DP translator + }; + UCHAR ucLaneNum; // how many lanes to enable + UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP + UCHAR ucHPD_ID; // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version +}DIG_ENCODER_CONTROL_PARAMETERS_V4; + +// define ucBitPerColor: +#define PANEL_BPC_UNDEFINE 0x00 +#define PANEL_6BIT_PER_COLOR 0x01 +#define PANEL_8BIT_PER_COLOR 0x02 +#define PANEL_10BIT_PER_COLOR 0x03 +#define PANEL_12BIT_PER_COLOR 0x04 +#define PANEL_16BIT_PER_COLOR 0x05 + +//define ucPanelMode +#define DP_PANEL_MODE_EXTERNAL_DP_MODE 0x00 +#define DP_PANEL_MODE_INTERNAL_DP2_MODE 0x01 +#define DP_PANEL_MODE_INTERNAL_DP1_MODE 0x11 + +/****************************************************************************/ +// Structures used by UNIPHYTransmitterControlTable +// LVTMATransmitterControlTable +// DVOOutputControlTable +/****************************************************************************/ +typedef struct _ATOM_DP_VS_MODE +{ + UCHAR ucLaneSel; + UCHAR ucLaneSet; +}ATOM_DP_VS_MODE; + +typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS +{ + union + { + USHORT usPixelClock; // in 10KHz; for bios convenient + USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h + ATOM_DP_VS_MODE asMode; // DP Voltage swing mode + }; + UCHAR ucConfig; + // [0]=0: 4 lane Link, + // =1: 8 lane Link ( Dual Links TMDS ) + // [1]=0: InCoherent mode + // =1: Coherent Mode + // [2] Link Select: + // =0: PHY linkA if bfLane<3 + // =1: PHY linkB if bfLanes<3 + // =0: PHY linkA+B if bfLanes=3 + // [5:4]PCIE lane Sel + // =0: lane 0~3 or 0~7 + // =1: lane 4~7 + // =2: lane 8~11 or 8~15 + // =3: lane 12~15 + UCHAR ucAction; // =0: turn off encoder + // =1: turn on encoder + UCHAR ucReserved[4]; +}DIG_TRANSMITTER_CONTROL_PARAMETERS; + +#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS + +//ucInitInfo +#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff + +//ucConfig +#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK 0x01 +#define ATOM_TRANSMITTER_CONFIG_COHERENT 0x02 +#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK 0x04 +#define ATOM_TRANSMITTER_CONFIG_LINKA 0x00 +#define ATOM_TRANSMITTER_CONFIG_LINKB 0x04 +#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00 +#define ATOM_TRANSMITTER_CONFIG_LINKB_A 0x04 + +#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE +#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 // only used when ATOM_TRANSMITTER_ACTION_ENABLE +#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE + +#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK 0x30 +#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL 0x00 +#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PCIE 0x20 +#define ATOM_TRANSMITTER_CONFIG_CLKSRC_XTALIN 0x30 +#define ATOM_TRANSMITTER_CONFIG_LANE_SEL_MASK 0xc0 +#define ATOM_TRANSMITTER_CONFIG_LANE_0_3 0x00 +#define ATOM_TRANSMITTER_CONFIG_LANE_0_7 0x00 +#define ATOM_TRANSMITTER_CONFIG_LANE_4_7 0x40 +#define ATOM_TRANSMITTER_CONFIG_LANE_8_11 0x80 +#define ATOM_TRANSMITTER_CONFIG_LANE_8_15 0x80 +#define ATOM_TRANSMITTER_CONFIG_LANE_12_15 0xc0 + +//ucAction +#define ATOM_TRANSMITTER_ACTION_DISABLE 0 +#define ATOM_TRANSMITTER_ACTION_ENABLE 1 +#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF 2 +#define ATOM_TRANSMITTER_ACTION_LCD_BLON 3 +#define ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL 4 +#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_START 5 +#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_STOP 6 +#define ATOM_TRANSMITTER_ACTION_INIT 7 +#define ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT 8 +#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT 9 +#define ATOM_TRANSMITTER_ACTION_SETUP 10 +#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH 11 +#define ATOM_TRANSMITTER_ACTION_POWER_ON 12 +#define ATOM_TRANSMITTER_ACTION_POWER_OFF 13 + +// Following are used for DigTransmitterControlTable ver1.2 +typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2 +{ +#if ATOM_BIG_ENDIAN + UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) + // =1 Dig Transmitter 2 ( Uniphy CD ) + // =2 Dig Transmitter 3 ( Uniphy EF ) + UCHAR ucReserved:1; + UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector + UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) + UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E + // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F + + UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) + UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector +#else + UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector + UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) + UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E + // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F + UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) + UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector + UCHAR ucReserved:1; + UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) + // =1 Dig Transmitter 2 ( Uniphy CD ) + // =2 Dig Transmitter 3 ( Uniphy EF ) +#endif +}ATOM_DIG_TRANSMITTER_CONFIG_V2; + +//ucConfig +//Bit0 +#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR 0x01 + +//Bit1 +#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT 0x02 + +//Bit2 +#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK 0x04 +#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00 +#define ATOM_TRANSMITTER_CONFIG_V2_LINKB 0x04 + +// Bit3 +#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK 0x08 +#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP +#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP + +// Bit4 +#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR 0x10 + +// Bit7:6 +#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK 0xC0 +#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 //AB +#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 //CD +#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 //EF + +typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 +{ + union + { + USHORT usPixelClock; // in 10KHz; for bios convenient + USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h + ATOM_DP_VS_MODE asMode; // DP Voltage swing mode + }; + ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig; + UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX + UCHAR ucReserved[4]; +}DIG_TRANSMITTER_CONTROL_PARAMETERS_V2; + +typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3 +{ +#if ATOM_BIG_ENDIAN + UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) + // =1 Dig Transmitter 2 ( Uniphy CD ) + // =2 Dig Transmitter 3 ( Uniphy EF ) + UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2 + UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F + UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E + // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F + UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) + UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector +#else + UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector + UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) + UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E + // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F + UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F + UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2 + UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) + // =1 Dig Transmitter 2 ( Uniphy CD ) + // =2 Dig Transmitter 3 ( Uniphy EF ) +#endif +}ATOM_DIG_TRANSMITTER_CONFIG_V3; + + +typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 +{ + union + { + USHORT usPixelClock; // in 10KHz; for bios convenient + USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h + ATOM_DP_VS_MODE asMode; // DP Voltage swing mode + }; + ATOM_DIG_TRANSMITTER_CONFIG_V3 acConfig; + UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX + UCHAR ucLaneNum; + UCHAR ucReserved[3]; +}DIG_TRANSMITTER_CONTROL_PARAMETERS_V3; + +//ucConfig +//Bit0 +#define ATOM_TRANSMITTER_CONFIG_V3_DUAL_LINK_CONNECTOR 0x01 + +//Bit1 +#define ATOM_TRANSMITTER_CONFIG_V3_COHERENT 0x02 + +//Bit2 +#define ATOM_TRANSMITTER_CONFIG_V3_LINK_SEL_MASK 0x04 +#define ATOM_TRANSMITTER_CONFIG_V3_LINKA 0x00 +#define ATOM_TRANSMITTER_CONFIG_V3_LINKB 0x04 + +// Bit3 +#define ATOM_TRANSMITTER_CONFIG_V3_ENCODER_SEL_MASK 0x08 +#define ATOM_TRANSMITTER_CONFIG_V3_DIG1_ENCODER 0x00 +#define ATOM_TRANSMITTER_CONFIG_V3_DIG2_ENCODER 0x08 + +// Bit5:4 +#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SEL_MASK 0x30 +#define ATOM_TRASMITTER_CONFIG_V3_P1PLL 0x00 +#define ATOM_TRASMITTER_CONFIG_V3_P2PLL 0x10 +#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SRC_EXT 0x20 + +// Bit7:6 +#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER_SEL_MASK 0xC0 +#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER1 0x00 //AB +#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD +#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF + + +/****************************************************************************/ +// Structures used by UNIPHYTransmitterControlTable V1.4 +// ASIC Families: NI +// ucTableFormatRevision=1 +// ucTableContentRevision=4 +/****************************************************************************/ +typedef struct _ATOM_DP_VS_MODE_V4 +{ + UCHAR ucLaneSel; + union + { + UCHAR ucLaneSet; + struct { +#if ATOM_BIG_ENDIAN + UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4 + UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level + UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level +#else + UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level + UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level + UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4 +#endif + }; + }; +}ATOM_DP_VS_MODE_V4; + +typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V4 +{ +#if ATOM_BIG_ENDIAN + UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) + // =1 Dig Transmitter 2 ( Uniphy CD ) + // =2 Dig Transmitter 3 ( Uniphy EF ) + UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New + UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F + UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E + // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F + UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) + UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector +#else + UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector + UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) + UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E + // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F + UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F + UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New + UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) + // =1 Dig Transmitter 2 ( Uniphy CD ) + // =2 Dig Transmitter 3 ( Uniphy EF ) +#endif +}ATOM_DIG_TRANSMITTER_CONFIG_V4; + +typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 +{ + union + { + USHORT usPixelClock; // in 10KHz; for bios convenient + USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h + ATOM_DP_VS_MODE_V4 asMode; // DP Voltage swing mode Redefined comparing to previous version + }; + union + { + ATOM_DIG_TRANSMITTER_CONFIG_V4 acConfig; + UCHAR ucConfig; + }; + UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX + UCHAR ucLaneNum; + UCHAR ucReserved[3]; +}DIG_TRANSMITTER_CONTROL_PARAMETERS_V4; + +//ucConfig +//Bit0 +#define ATOM_TRANSMITTER_CONFIG_V4_DUAL_LINK_CONNECTOR 0x01 +//Bit1 +#define ATOM_TRANSMITTER_CONFIG_V4_COHERENT 0x02 +//Bit2 +#define ATOM_TRANSMITTER_CONFIG_V4_LINK_SEL_MASK 0x04 +#define ATOM_TRANSMITTER_CONFIG_V4_LINKA 0x00 +#define ATOM_TRANSMITTER_CONFIG_V4_LINKB 0x04 +// Bit3 +#define ATOM_TRANSMITTER_CONFIG_V4_ENCODER_SEL_MASK 0x08 +#define ATOM_TRANSMITTER_CONFIG_V4_DIG1_ENCODER 0x00 +#define ATOM_TRANSMITTER_CONFIG_V4_DIG2_ENCODER 0x08 +// Bit5:4 +#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SEL_MASK 0x30 +#define ATOM_TRANSMITTER_CONFIG_V4_P1PLL 0x00 +#define ATOM_TRANSMITTER_CONFIG_V4_P2PLL 0x10 +#define ATOM_TRANSMITTER_CONFIG_V4_DCPLL 0x20 // New in _V4 +#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SRC_EXT 0x30 // Changed comparing to V3 +// Bit7:6 +#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER_SEL_MASK 0xC0 +#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER1 0x00 //AB +#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER2 0x40 //CD +#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3 0x80 //EF + + +typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V5 +{ +#if ATOM_BIG_ENDIAN + UCHAR ucReservd1:1; + UCHAR ucHPDSel:3; + UCHAR ucPhyClkSrcId:2; + UCHAR ucCoherentMode:1; + UCHAR ucReserved:1; +#else + UCHAR ucReserved:1; + UCHAR ucCoherentMode:1; + UCHAR ucPhyClkSrcId:2; + UCHAR ucHPDSel:3; + UCHAR ucReservd1:1; +#endif +}ATOM_DIG_TRANSMITTER_CONFIG_V5; + +typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 +{ + USHORT usSymClock; // Encoder Clock in 10kHz,(DP mode)= linkclock/10, (TMDS/LVDS/HDMI)= pixel clock, (HDMI deep color), =pixel clock * deep_color_ratio + UCHAR ucPhyId; // 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4= UNIPHYE 5=UNIPHYF + UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_xxx + UCHAR ucLaneNum; // indicate lane number 1-8 + UCHAR ucConnObjId; // Connector Object Id defined in ObjectId.h + UCHAR ucDigMode; // indicate DIG mode + union{ + ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig; + UCHAR ucConfig; + }; + UCHAR ucDigEncoderSel; // indicate DIG front end encoder + UCHAR ucDPLaneSet; + UCHAR ucReserved; + UCHAR ucReserved1; +}DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5; + +//ucPhyId +#define ATOM_PHY_ID_UNIPHYA 0 +#define ATOM_PHY_ID_UNIPHYB 1 +#define ATOM_PHY_ID_UNIPHYC 2 +#define ATOM_PHY_ID_UNIPHYD 3 +#define ATOM_PHY_ID_UNIPHYE 4 +#define ATOM_PHY_ID_UNIPHYF 5 +#define ATOM_PHY_ID_UNIPHYG 6 + +// ucDigEncoderSel +#define ATOM_TRANMSITTER_V5__DIGA_SEL 0x01 +#define ATOM_TRANMSITTER_V5__DIGB_SEL 0x02 +#define ATOM_TRANMSITTER_V5__DIGC_SEL 0x04 +#define ATOM_TRANMSITTER_V5__DIGD_SEL 0x08 +#define ATOM_TRANMSITTER_V5__DIGE_SEL 0x10 +#define ATOM_TRANMSITTER_V5__DIGF_SEL 0x20 +#define ATOM_TRANMSITTER_V5__DIGG_SEL 0x40 + +// ucDigMode +#define ATOM_TRANSMITTER_DIGMODE_V5_DP 0 +#define ATOM_TRANSMITTER_DIGMODE_V5_LVDS 1 +#define ATOM_TRANSMITTER_DIGMODE_V5_DVI 2 +#define ATOM_TRANSMITTER_DIGMODE_V5_HDMI 3 +#define ATOM_TRANSMITTER_DIGMODE_V5_SDVO 4 +#define ATOM_TRANSMITTER_DIGMODE_V5_DP_MST 5 + +// ucDPLaneSet +#define DP_LANE_SET__0DB_0_4V 0x00 +#define DP_LANE_SET__0DB_0_6V 0x01 +#define DP_LANE_SET__0DB_0_8V 0x02 +#define DP_LANE_SET__0DB_1_2V 0x03 +#define DP_LANE_SET__3_5DB_0_4V 0x08 +#define DP_LANE_SET__3_5DB_0_6V 0x09 +#define DP_LANE_SET__3_5DB_0_8V 0x0a +#define DP_LANE_SET__6DB_0_4V 0x10 +#define DP_LANE_SET__6DB_0_6V 0x11 +#define DP_LANE_SET__9_5DB_0_4V 0x18 + +// ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig; +// Bit1 +#define ATOM_TRANSMITTER_CONFIG_V5_COHERENT 0x02 + +// Bit3:2 +#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_MASK 0x0c +#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_SHIFT 0x02 + +#define ATOM_TRANSMITTER_CONFIG_V5_P1PLL 0x00 +#define ATOM_TRANSMITTER_CONFIG_V5_P2PLL 0x04 +#define ATOM_TRANSMITTER_CONFIG_V5_P0PLL 0x08 +#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT 0x0c +// Bit6:4 +#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_MASK 0x70 +#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_SHIFT 0x04 + +#define ATOM_TRANSMITTER_CONFIG_V5_NO_HPD_SEL 0x00 +#define ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL 0x10 +#define ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL 0x20 +#define ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL 0x30 +#define ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL 0x40 +#define ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL 0x50 +#define ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL 0x60 + +#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION_V1_5 DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 + + +/****************************************************************************/ +// Structures used by ExternalEncoderControlTable V1.3 +// ASIC Families: Evergreen, Llano, NI +// ucTableFormatRevision=1 +// ucTableContentRevision=3 +/****************************************************************************/ + +typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 +{ + union{ + USHORT usPixelClock; // pixel clock in 10Khz, valid when ucAction=SETUP/ENABLE_OUTPUT + USHORT usConnectorId; // connector id, valid when ucAction = INIT + }; + UCHAR ucConfig; // indicate which encoder, and DP link rate when ucAction = SETUP/ENABLE_OUTPUT + UCHAR ucAction; // + UCHAR ucEncoderMode; // encoder mode, only used when ucAction = SETUP/ENABLE_OUTPUT + UCHAR ucLaneNum; // lane number, only used when ucAction = SETUP/ENABLE_OUTPUT + UCHAR ucBitPerColor; // output bit per color, only valid when ucAction = SETUP/ENABLE_OUTPUT and ucEncodeMode= DP + UCHAR ucReserved; +}EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3; + +// ucAction +#define EXTERANL_ENCODER_ACTION_V3_DISABLE_OUTPUT 0x00 +#define EXTERANL_ENCODER_ACTION_V3_ENABLE_OUTPUT 0x01 +#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT 0x07 +#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP 0x0f +#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10 +#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11 +#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12 +#define EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP 0x14 + +// ucConfig +#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03 +#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00 +#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01 +#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ 0x02 +#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER_SEL_MAKS 0x70 +#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER1 0x00 +#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER2 0x10 +#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER3 0x20 + +typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 +{ + EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 sExtEncoder; + ULONG ulReserved[2]; +}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3; + + +/****************************************************************************/ +// Structures used by DAC1OuputControlTable +// DAC2OuputControlTable +// LVTMAOutputControlTable (Before DEC30) +// TMDSAOutputControlTable (Before DEC30) +/****************************************************************************/ +typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS +{ + UCHAR ucAction; // Possible input:ATOM_ENABLE||ATOMDISABLE + // When the display is LCD, in addition to above: + // ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START|| + // ATOM_LCD_SELFTEST_STOP + + UCHAR aucPadding[3]; // padding to DWORD aligned +}DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS; + +#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS + + +#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS +#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION + +#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS +#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION + +#define CV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS +#define CV1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION + +#define TV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS +#define TV1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION + +#define DFP1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS +#define DFP1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION + +#define DFP2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS +#define DFP2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION + +#define LCD1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS +#define LCD1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION + +#define DVO_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS +#define DVO_OUTPUT_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PS_ALLOCATION +#define DVO_OUTPUT_CONTROL_PARAMETERS_V3 DIG_TRANSMITTER_CONTROL_PARAMETERS + + +typedef struct _LVTMA_OUTPUT_CONTROL_PARAMETERS_V2 +{ + // Possible value of ucAction + // ATOM_TRANSMITTER_ACTION_LCD_BLON + // ATOM_TRANSMITTER_ACTION_LCD_BLOFF + // ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL + // ATOM_TRANSMITTER_ACTION_POWER_ON + // ATOM_TRANSMITTER_ACTION_POWER_OFF + UCHAR ucAction; + UCHAR ucBriLevel; + USHORT usPwmFreq; // in unit of Hz, 200 means 200Hz +}LVTMA_OUTPUT_CONTROL_PARAMETERS_V2; + + + +/****************************************************************************/ +// Structures used by BlankCRTCTable +/****************************************************************************/ +typedef struct _BLANK_CRTC_PARAMETERS +{ + UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 + UCHAR ucBlanking; // ATOM_BLANKING or ATOM_BLANKINGOFF + USHORT usBlackColorRCr; + USHORT usBlackColorGY; + USHORT usBlackColorBCb; +}BLANK_CRTC_PARAMETERS; +#define BLANK_CRTC_PS_ALLOCATION BLANK_CRTC_PARAMETERS + +/****************************************************************************/ +// Structures used by EnableCRTCTable +// EnableCRTCMemReqTable +// UpdateCRTC_DoubleBufferRegistersTable +/****************************************************************************/ +typedef struct _ENABLE_CRTC_PARAMETERS +{ + UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE + UCHAR ucPadding[2]; +}ENABLE_CRTC_PARAMETERS; +#define ENABLE_CRTC_PS_ALLOCATION ENABLE_CRTC_PARAMETERS + +/****************************************************************************/ +// Structures used by SetCRTC_OverScanTable +/****************************************************************************/ +typedef struct _SET_CRTC_OVERSCAN_PARAMETERS +{ + USHORT usOverscanRight; // right + USHORT usOverscanLeft; // left + USHORT usOverscanBottom; // bottom + USHORT usOverscanTop; // top + UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 + UCHAR ucPadding[3]; +}SET_CRTC_OVERSCAN_PARAMETERS; +#define SET_CRTC_OVERSCAN_PS_ALLOCATION SET_CRTC_OVERSCAN_PARAMETERS + +/****************************************************************************/ +// Structures used by SetCRTC_ReplicationTable +/****************************************************************************/ +typedef struct _SET_CRTC_REPLICATION_PARAMETERS +{ + UCHAR ucH_Replication; // horizontal replication + UCHAR ucV_Replication; // vertical replication + UCHAR usCRTC; // ATOM_CRTC1 or ATOM_CRTC2 + UCHAR ucPadding; +}SET_CRTC_REPLICATION_PARAMETERS; +#define SET_CRTC_REPLICATION_PS_ALLOCATION SET_CRTC_REPLICATION_PARAMETERS + +/****************************************************************************/ +// Structures used by SelectCRTC_SourceTable +/****************************************************************************/ +typedef struct _SELECT_CRTC_SOURCE_PARAMETERS +{ + UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 + UCHAR ucDevice; // ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|.... + UCHAR ucPadding[2]; +}SELECT_CRTC_SOURCE_PARAMETERS; +#define SELECT_CRTC_SOURCE_PS_ALLOCATION SELECT_CRTC_SOURCE_PARAMETERS + +typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2 +{ + UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 + UCHAR ucEncoderID; // DAC1/DAC2/TVOUT/DIG1/DIG2/DVO + UCHAR ucEncodeMode; // Encoding mode, only valid when using DIG1/DIG2/DVO + UCHAR ucPadding; +}SELECT_CRTC_SOURCE_PARAMETERS_V2; + +//ucEncoderID +//#define ASIC_INT_DAC1_ENCODER_ID 0x00 +//#define ASIC_INT_TV_ENCODER_ID 0x02 +//#define ASIC_INT_DIG1_ENCODER_ID 0x03 +//#define ASIC_INT_DAC2_ENCODER_ID 0x04 +//#define ASIC_EXT_TV_ENCODER_ID 0x06 +//#define ASIC_INT_DVO_ENCODER_ID 0x07 +//#define ASIC_INT_DIG2_ENCODER_ID 0x09 +//#define ASIC_EXT_DIG_ENCODER_ID 0x05 + +//ucEncodeMode +//#define ATOM_ENCODER_MODE_DP 0 +//#define ATOM_ENCODER_MODE_LVDS 1 +//#define ATOM_ENCODER_MODE_DVI 2 +//#define ATOM_ENCODER_MODE_HDMI 3 +//#define ATOM_ENCODER_MODE_SDVO 4 +//#define ATOM_ENCODER_MODE_TV 13 +//#define ATOM_ENCODER_MODE_CV 14 +//#define ATOM_ENCODER_MODE_CRT 15 + + +typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V3 +{ + UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 + UCHAR ucEncoderID; // DAC1/DAC2/TVOUT/DIG1/DIG2/DVO + UCHAR ucEncodeMode; // Encoding mode, only valid when using DIG1/DIG2/DVO + UCHAR ucDstBpc; // PANEL_6/8/10/12BIT_PER_COLOR +}SELECT_CRTC_SOURCE_PARAMETERS_V3; + + +/****************************************************************************/ +// Structures used by SetPixelClockTable +// GetPixelClockTable +/****************************************************************************/ +//Major revision=1., Minor revision=1 +typedef struct _PIXEL_CLOCK_PARAMETERS +{ + USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) + // 0 means disable PPLL + USHORT usRefDiv; // Reference divider + USHORT usFbDiv; // feedback divider + UCHAR ucPostDiv; // post divider + UCHAR ucFracFbDiv; // fractional feedback divider + UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2 + UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER + UCHAR ucCRTC; // Which CRTC uses this Ppll + UCHAR ucPadding; +}PIXEL_CLOCK_PARAMETERS; + +//Major revision=1., Minor revision=2, add ucMiscIfno +//ucMiscInfo: +#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1 +#define MISC_DEVICE_INDEX_MASK 0xF0 +#define MISC_DEVICE_INDEX_SHIFT 4 + +typedef struct _PIXEL_CLOCK_PARAMETERS_V2 +{ + USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) + // 0 means disable PPLL + USHORT usRefDiv; // Reference divider + USHORT usFbDiv; // feedback divider + UCHAR ucPostDiv; // post divider + UCHAR ucFracFbDiv; // fractional feedback divider + UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2 + UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER + UCHAR ucCRTC; // Which CRTC uses this Ppll + UCHAR ucMiscInfo; // Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog +}PIXEL_CLOCK_PARAMETERS_V2; + +//Major revision=1., Minor revision=3, structure/definition change +//ucEncoderMode: +//ATOM_ENCODER_MODE_DP +//ATOM_ENOCDER_MODE_LVDS +//ATOM_ENOCDER_MODE_DVI +//ATOM_ENOCDER_MODE_HDMI +//ATOM_ENOCDER_MODE_SDVO +//ATOM_ENCODER_MODE_TV 13 +//ATOM_ENCODER_MODE_CV 14 +//ATOM_ENCODER_MODE_CRT 15 + +//ucDVOConfig +//#define DVO_ENCODER_CONFIG_RATE_SEL 0x01 +//#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 +//#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 +//#define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c +//#define DVO_ENCODER_CONFIG_LOW12BIT 0x00 +//#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 +//#define DVO_ENCODER_CONFIG_24BIT 0x08 + +//ucMiscInfo: also changed, see below +#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL 0x01 +#define PIXEL_CLOCK_MISC_VGA_MODE 0x02 +#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK 0x04 +#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1 0x00 +#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2 0x04 +#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK 0x08 +#define PIXEL_CLOCK_MISC_REF_DIV_SRC 0x10 +// V1.4 for RoadRunner +#define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10 +#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20 + + +typedef struct _PIXEL_CLOCK_PARAMETERS_V3 +{ + USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) + // 0 means disable PPLL. For VGA PPLL,make sure this value is not 0. + USHORT usRefDiv; // Reference divider + USHORT usFbDiv; // feedback divider + UCHAR ucPostDiv; // post divider + UCHAR ucFracFbDiv; // fractional feedback divider + UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2 + UCHAR ucTransmitterId; // graphic encoder id defined in objectId.h + union + { + UCHAR ucEncoderMode; // encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/ + UCHAR ucDVOConfig; // when use DVO, need to know SDR/DDR, 12bit or 24bit + }; + UCHAR ucMiscInfo; // bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel + // bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source + // bit[4]=0:use XTALIN as the source of reference divider,=1 use the pre-defined clock as the source of reference divider +}PIXEL_CLOCK_PARAMETERS_V3; + +#define PIXEL_CLOCK_PARAMETERS_LAST PIXEL_CLOCK_PARAMETERS_V2 +#define GET_PIXEL_CLOCK_PS_ALLOCATION PIXEL_CLOCK_PARAMETERS_LAST + + +typedef struct _PIXEL_CLOCK_PARAMETERS_V5 +{ + UCHAR ucCRTC; // ATOM_CRTC1~6, indicate the CRTC controller to + // drive the pixel clock. not used for DCPLL case. + union{ + UCHAR ucReserved; + UCHAR ucFracFbDiv; // [gphan] temporary to prevent build problem. remove it after driver code is changed. + }; + USHORT usPixelClock; // target the pixel clock to drive the CRTC timing + // 0 means disable PPLL/DCPLL. + USHORT usFbDiv; // feedback divider integer part. + UCHAR ucPostDiv; // post divider. + UCHAR ucRefDiv; // Reference divider + UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL + UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h, + // indicate which graphic encoder will be used. + UCHAR ucEncoderMode; // Encoder mode: + UCHAR ucMiscInfo; // bit[0]= Force program PPLL + // bit[1]= when VGA timing is used. + // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp + // bit[4]= RefClock source for PPLL. + // =0: XTLAIN( default mode ) + // =1: other external clock source, which is pre-defined + // by VBIOS depend on the feature required. + // bit[7:5]: reserved. + ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 ) + +}PIXEL_CLOCK_PARAMETERS_V5; + +#define PIXEL_CLOCK_V5_MISC_FORCE_PROG_PPLL 0x01 +#define PIXEL_CLOCK_V5_MISC_VGA_MODE 0x02 +#define PIXEL_CLOCK_V5_MISC_HDMI_BPP_MASK 0x0c +#define PIXEL_CLOCK_V5_MISC_HDMI_24BPP 0x00 +#define PIXEL_CLOCK_V5_MISC_HDMI_30BPP 0x04 +#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08 +#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10 + +typedef struct _CRTC_PIXEL_CLOCK_FREQ +{ +#if ATOM_BIG_ENDIAN + ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to + // drive the pixel clock. not used for DCPLL case. + ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing. + // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version. +#else + ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing. + // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version. + ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to + // drive the pixel clock. not used for DCPLL case. +#endif +}CRTC_PIXEL_CLOCK_FREQ; + +typedef struct _PIXEL_CLOCK_PARAMETERS_V6 +{ + union{ + CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq; // pixel clock and CRTC id frequency + ULONG ulDispEngClkFreq; // dispclk frequency + }; + USHORT usFbDiv; // feedback divider integer part. + UCHAR ucPostDiv; // post divider. + UCHAR ucRefDiv; // Reference divider + UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL + UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h, + // indicate which graphic encoder will be used. + UCHAR ucEncoderMode; // Encoder mode: + UCHAR ucMiscInfo; // bit[0]= Force program PPLL + // bit[1]= when VGA timing is used. + // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp + // bit[4]= RefClock source for PPLL. + // =0: XTLAIN( default mode ) + // =1: other external clock source, which is pre-defined + // by VBIOS depend on the feature required. + // bit[7:5]: reserved. + ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 ) + +}PIXEL_CLOCK_PARAMETERS_V6; + +#define PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL 0x01 +#define PIXEL_CLOCK_V6_MISC_VGA_MODE 0x02 +#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c +#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00 +#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04 +#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6 0x08 //for V6, the correct defintion for 36bpp should be 2 for 36bpp(2:1) +#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08 +#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6 0x04 //for V6, the correct defintion for 30bpp should be 1 for 36bpp(5:4) +#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c +#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10 +#define PIXEL_CLOCK_V6_MISC_GEN_DPREFCLK 0x40 +#define PIXEL_CLOCK_V6_MISC_DPREFCLK_BYPASS 0x40 + +typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2 +{ + PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput; +}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2; + +typedef struct _GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2 +{ + UCHAR ucStatus; + UCHAR ucRefDivSrc; // =1: reference clock source from XTALIN, =0: source from PCIE ref clock + UCHAR ucReserved[2]; +}GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2; + +typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3 +{ + PIXEL_CLOCK_PARAMETERS_V5 sDispClkInput; +}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3; + + +/****************************************************************************/ +// Structures used by AdjustDisplayPllTable +/****************************************************************************/ +typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS +{ + USHORT usPixelClock; + UCHAR ucTransmitterID; + UCHAR ucEncodeMode; + union + { + UCHAR ucDVOConfig; //if DVO, need passing link rate and output 12bitlow or 24bit + UCHAR ucConfig; //if none DVO, not defined yet + }; + UCHAR ucReserved[3]; +}ADJUST_DISPLAY_PLL_PARAMETERS; + +#define ADJUST_DISPLAY_CONFIG_SS_ENABLE 0x10 +#define ADJUST_DISPLAY_PLL_PS_ALLOCATION ADJUST_DISPLAY_PLL_PARAMETERS + +typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 +{ + USHORT usPixelClock; // target pixel clock + UCHAR ucTransmitterID; // GPU transmitter id defined in objectid.h + UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI + UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX + UCHAR ucExtTransmitterID; // external encoder id. + UCHAR ucReserved[2]; +}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3; + +// usDispPllConfig v1.2 for RoadRunner +#define DISPPLL_CONFIG_DVO_RATE_SEL 0x0001 // need only when ucTransmitterID = DVO +#define DISPPLL_CONFIG_DVO_DDR_SPEED 0x0000 // need only when ucTransmitterID = DVO +#define DISPPLL_CONFIG_DVO_SDR_SPEED 0x0001 // need only when ucTransmitterID = DVO +#define DISPPLL_CONFIG_DVO_OUTPUT_SEL 0x000c // need only when ucTransmitterID = DVO +#define DISPPLL_CONFIG_DVO_LOW12BIT 0x0000 // need only when ucTransmitterID = DVO +#define DISPPLL_CONFIG_DVO_UPPER12BIT 0x0004 // need only when ucTransmitterID = DVO +#define DISPPLL_CONFIG_DVO_24BIT 0x0008 // need only when ucTransmitterID = DVO +#define DISPPLL_CONFIG_SS_ENABLE 0x0010 // Only used when ucEncoderMode = DP or LVDS +#define DISPPLL_CONFIG_COHERENT_MODE 0x0020 // Only used when ucEncoderMode = TMDS or HDMI +#define DISPPLL_CONFIG_DUAL_LINK 0x0040 // Only used when ucEncoderMode = TMDS or LVDS + + +typedef struct _ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3 +{ + ULONG ulDispPllFreq; // return display PPLL freq which is used to generate the pixclock, and related idclk, symclk etc + UCHAR ucRefDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider and post_div ( if it is not given ) + UCHAR ucPostDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider + UCHAR ucReserved[2]; +}ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3; + +typedef struct _ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 +{ + union + { + ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 sInput; + ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3 sOutput; + }; +} ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3; + +/****************************************************************************/ +// Structures used by EnableYUVTable +/****************************************************************************/ +typedef struct _ENABLE_YUV_PARAMETERS +{ + UCHAR ucEnable; // ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB) + UCHAR ucCRTC; // Which CRTC needs this YUV or RGB format + UCHAR ucPadding[2]; +}ENABLE_YUV_PARAMETERS; +#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS + +/****************************************************************************/ +// Structures used by GetMemoryClockTable +/****************************************************************************/ +typedef struct _GET_MEMORY_CLOCK_PARAMETERS +{ + ULONG ulReturnMemoryClock; // current memory speed in 10KHz unit +} GET_MEMORY_CLOCK_PARAMETERS; +#define GET_MEMORY_CLOCK_PS_ALLOCATION GET_MEMORY_CLOCK_PARAMETERS + +/****************************************************************************/ +// Structures used by GetEngineClockTable +/****************************************************************************/ +typedef struct _GET_ENGINE_CLOCK_PARAMETERS +{ + ULONG ulReturnEngineClock; // current engine speed in 10KHz unit +} GET_ENGINE_CLOCK_PARAMETERS; +#define GET_ENGINE_CLOCK_PS_ALLOCATION GET_ENGINE_CLOCK_PARAMETERS + +/****************************************************************************/ +// Following Structures and constant may be obsolete +/****************************************************************************/ +//Maxium 8 bytes,the data read in will be placed in the parameter space. +//Read operaion successeful when the paramter space is non-zero, otherwise read operation failed +typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS +{ + USHORT usPrescale; //Ratio between Engine clock and I2C clock + USHORT usVRAMAddress; //Adress in Frame Buffer where to pace raw EDID + USHORT usStatus; //When use output: lower byte EDID checksum, high byte hardware status + //WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte + UCHAR ucSlaveAddr; //Read from which slave + UCHAR ucLineNumber; //Read from which HW assisted line +}READ_EDID_FROM_HW_I2C_DATA_PARAMETERS; +#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION READ_EDID_FROM_HW_I2C_DATA_PARAMETERS + + +#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE 0 +#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES 1 +#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK 2 +#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK 3 +#define ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK 4 + +typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS +{ + USHORT usPrescale; //Ratio between Engine clock and I2C clock + USHORT usByteOffset; //Write to which byte + //Upper portion of usByteOffset is Format of data + //1bytePS+offsetPS + //2bytesPS+offsetPS + //blockID+offsetPS + //blockID+offsetID + //blockID+counterID+offsetID + UCHAR ucData; //PS data1 + UCHAR ucStatus; //Status byte 1=success, 2=failure, Also is used as PS data2 + UCHAR ucSlaveAddr; //Write to which slave + UCHAR ucLineNumber; //Write from which HW assisted line +}WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS; + +#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS + +typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS +{ + USHORT usPrescale; //Ratio between Engine clock and I2C clock + UCHAR ucSlaveAddr; //Write to which slave + UCHAR ucLineNumber; //Write from which HW assisted line +}SET_UP_HW_I2C_DATA_PARAMETERS; + +/**************************************************************************/ +#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS + + +/****************************************************************************/ +// Structures used by PowerConnectorDetectionTable +/****************************************************************************/ +typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS +{ + UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected + UCHAR ucPwrBehaviorId; + USHORT usPwrBudget; //how much power currently boot to in unit of watt +}POWER_CONNECTOR_DETECTION_PARAMETERS; + +typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION +{ + UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected + UCHAR ucReserved; + USHORT usPwrBudget; //how much power currently boot to in unit of watt + WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; +}POWER_CONNECTOR_DETECTION_PS_ALLOCATION; + + +/****************************LVDS SS Command Table Definitions**********************/ + +/****************************************************************************/ +// Structures used by EnableSpreadSpectrumOnPPLLTable +/****************************************************************************/ +typedef struct _ENABLE_LVDS_SS_PARAMETERS +{ + USHORT usSpreadSpectrumPercentage; + UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD + UCHAR ucSpreadSpectrumStepSize_Delay; //bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY + UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE + UCHAR ucPadding[3]; +}ENABLE_LVDS_SS_PARAMETERS; + +//ucTableFormatRevision=1,ucTableContentRevision=2 +typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2 +{ + USHORT usSpreadSpectrumPercentage; + UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD + UCHAR ucSpreadSpectrumStep; // + UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE + UCHAR ucSpreadSpectrumDelay; + UCHAR ucSpreadSpectrumRange; + UCHAR ucPadding; +}ENABLE_LVDS_SS_PARAMETERS_V2; + +//This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS. +typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL +{ + USHORT usSpreadSpectrumPercentage; + UCHAR ucSpreadSpectrumType; // Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD + UCHAR ucSpreadSpectrumStep; // + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE + UCHAR ucSpreadSpectrumDelay; + UCHAR ucSpreadSpectrumRange; + UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2 +}ENABLE_SPREAD_SPECTRUM_ON_PPLL; + + typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 +{ + USHORT usSpreadSpectrumPercentage; + UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread. + // Bit[1]: 1-Ext. 0-Int. + // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL + // Bits[7:4] reserved + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE + USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8] + USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC +}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2; + +#define ATOM_PPLL_SS_TYPE_V2_DOWN_SPREAD 0x00 +#define ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD 0x01 +#define ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD 0x02 +#define ATOM_PPLL_SS_TYPE_V2_PPLL_SEL_MASK 0x0c +#define ATOM_PPLL_SS_TYPE_V2_P1PLL 0x00 +#define ATOM_PPLL_SS_TYPE_V2_P2PLL 0x04 +#define ATOM_PPLL_SS_TYPE_V2_DCPLL 0x08 +#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK 0x00FF +#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT 0 +#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00 +#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8 + +// Used by DCE5.0 + typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 +{ + USHORT usSpreadSpectrumAmountFrac; // SS_AMOUNT_DSFRAC New in DCE5.0 + UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread. + // Bit[1]: 1-Ext. 0-Int. + // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL + // Bits[7:4] reserved + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE + USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8] + USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC +}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3; + + +#define ATOM_PPLL_SS_TYPE_V3_DOWN_SPREAD 0x00 +#define ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD 0x01 +#define ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD 0x02 +#define ATOM_PPLL_SS_TYPE_V3_PPLL_SEL_MASK 0x0c +#define ATOM_PPLL_SS_TYPE_V3_P1PLL 0x00 +#define ATOM_PPLL_SS_TYPE_V3_P2PLL 0x04 +#define ATOM_PPLL_SS_TYPE_V3_DCPLL 0x08 +#define ATOM_PPLL_SS_TYPE_V3_P0PLL ATOM_PPLL_SS_TYPE_V3_DCPLL +#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK 0x00FF +#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT 0 +#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK 0x0F00 +#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT 8 + +#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL + +typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION +{ + PIXEL_CLOCK_PARAMETERS sPCLKInput; + ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;//Caller doesn't need to init this portion +}SET_PIXEL_CLOCK_PS_ALLOCATION; + + + +#define ENABLE_VGA_RENDER_PS_ALLOCATION SET_PIXEL_CLOCK_PS_ALLOCATION + +/****************************************************************************/ +// Structures used by ### +/****************************************************************************/ +typedef struct _MEMORY_TRAINING_PARAMETERS +{ + ULONG ulTargetMemoryClock; //In 10Khz unit +}MEMORY_TRAINING_PARAMETERS; +#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS + + +typedef struct _MEMORY_TRAINING_PARAMETERS_V1_2 +{ + USHORT usMemTrainingMode; + USHORT usReserved; +}MEMORY_TRAINING_PARAMETERS_V1_2; + +//usMemTrainingMode +#define NORMAL_MEMORY_TRAINING_MODE 0 +#define ENTER_DRAM_SELFREFRESH_MODE 1 +#define EXIT_DRAM_SELFRESH_MODE 2 + +/****************************LVDS and other encoder command table definitions **********************/ + + +/****************************************************************************/ +// Structures used by LVDSEncoderControlTable (Before DEC30) +// LVTMAEncoderControlTable (Before DEC30) +// TMDSAEncoderControlTable (Before DEC30) +/****************************************************************************/ +typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS +{ + USHORT usPixelClock; // in 10KHz; for bios convenient + UCHAR ucMisc; // bit0=0: Enable single link + // =1: Enable dual link + // Bit1=0: 666RGB + // =1: 888RGB + UCHAR ucAction; // 0: turn off encoder + // 1: setup and turn on encoder +}LVDS_ENCODER_CONTROL_PARAMETERS; + +#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS + +#define TMDS1_ENCODER_CONTROL_PARAMETERS LVDS_ENCODER_CONTROL_PARAMETERS +#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS + +#define TMDS2_ENCODER_CONTROL_PARAMETERS TMDS1_ENCODER_CONTROL_PARAMETERS +#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS + +//ucTableFormatRevision=1,ucTableContentRevision=2 +typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 +{ + USHORT usPixelClock; // in 10KHz; for bios convenient + UCHAR ucMisc; // see PANEL_ENCODER_MISC_xx defintions below + UCHAR ucAction; // 0: turn off encoder + // 1: setup and turn on encoder + UCHAR ucTruncate; // bit0=0: Disable truncate + // =1: Enable truncate + // bit4=0: 666RGB + // =1: 888RGB + UCHAR ucSpatial; // bit0=0: Disable spatial dithering + // =1: Enable spatial dithering + // bit4=0: 666RGB + // =1: 888RGB + UCHAR ucTemporal; // bit0=0: Disable temporal dithering + // =1: Enable temporal dithering + // bit4=0: 666RGB + // =1: 888RGB + // bit5=0: Gray level 2 + // =1: Gray level 4 + UCHAR ucFRC; // bit4=0: 25FRC_SEL pattern E + // =1: 25FRC_SEL pattern F + // bit6:5=0: 50FRC_SEL pattern A + // =1: 50FRC_SEL pattern B + // =2: 50FRC_SEL pattern C + // =3: 50FRC_SEL pattern D + // bit7=0: 75FRC_SEL pattern E + // =1: 75FRC_SEL pattern F +}LVDS_ENCODER_CONTROL_PARAMETERS_V2; + +#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 + +#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 +#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2 + +#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2 +#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2 + + +#define LVDS_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V2 +#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3 + +#define TMDS1_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3 +#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS1_ENCODER_CONTROL_PARAMETERS_V3 + +#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3 +#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3 + +/****************************************************************************/ +// Structures used by ### +/****************************************************************************/ +typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS +{ + UCHAR ucEnable; // Enable or Disable External TMDS encoder + UCHAR ucMisc; // Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB} + UCHAR ucPadding[2]; +}ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS; + +typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION +{ + ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder; + WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion +}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION; + +#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 +typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2 +{ + ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder; + WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion +}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2; + +typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION +{ + DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder; + WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; +}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION; + +/****************************************************************************/ +// Structures used by DVOEncoderControlTable +/****************************************************************************/ +//ucTableFormatRevision=1,ucTableContentRevision=3 +//ucDVOConfig: +#define DVO_ENCODER_CONFIG_RATE_SEL 0x01 +#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 +#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 +#define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c +#define DVO_ENCODER_CONFIG_LOW12BIT 0x00 +#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 +#define DVO_ENCODER_CONFIG_24BIT 0x08 + +typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 +{ + USHORT usPixelClock; + UCHAR ucDVOConfig; + UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT + UCHAR ucReseved[4]; +}DVO_ENCODER_CONTROL_PARAMETERS_V3; +#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3 + +typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V1_4 +{ + USHORT usPixelClock; + UCHAR ucDVOConfig; + UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT + UCHAR ucBitPerColor; //please refer to definition of PANEL_xBIT_PER_COLOR + UCHAR ucReseved[3]; +}DVO_ENCODER_CONTROL_PARAMETERS_V1_4; +#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V1_4 DVO_ENCODER_CONTROL_PARAMETERS_V1_4 + + +//ucTableFormatRevision=1 +//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for +// bit1=0: non-coherent mode +// =1: coherent mode + +//========================================================================================== +//Only change is here next time when changing encoder parameter definitions again! +#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3 +#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST LVDS_ENCODER_CONTROL_PARAMETERS_LAST + +#define TMDS1_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3 +#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS1_ENCODER_CONTROL_PARAMETERS_LAST + +#define TMDS2_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3 +#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS2_ENCODER_CONTROL_PARAMETERS_LAST + +#define DVO_ENCODER_CONTROL_PARAMETERS_LAST DVO_ENCODER_CONTROL_PARAMETERS +#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST DVO_ENCODER_CONTROL_PS_ALLOCATION + +//========================================================================================== +#define PANEL_ENCODER_MISC_DUAL 0x01 +#define PANEL_ENCODER_MISC_COHERENT 0x02 +#define PANEL_ENCODER_MISC_TMDS_LINKB 0x04 +#define PANEL_ENCODER_MISC_HDMI_TYPE 0x08 + +#define PANEL_ENCODER_ACTION_DISABLE ATOM_DISABLE +#define PANEL_ENCODER_ACTION_ENABLE ATOM_ENABLE +#define PANEL_ENCODER_ACTION_COHERENTSEQ (ATOM_ENABLE+1) + +#define PANEL_ENCODER_TRUNCATE_EN 0x01 +#define PANEL_ENCODER_TRUNCATE_DEPTH 0x10 +#define PANEL_ENCODER_SPATIAL_DITHER_EN 0x01 +#define PANEL_ENCODER_SPATIAL_DITHER_DEPTH 0x10 +#define PANEL_ENCODER_TEMPORAL_DITHER_EN 0x01 +#define PANEL_ENCODER_TEMPORAL_DITHER_DEPTH 0x10 +#define PANEL_ENCODER_TEMPORAL_LEVEL_4 0x20 +#define PANEL_ENCODER_25FRC_MASK 0x10 +#define PANEL_ENCODER_25FRC_E 0x00 +#define PANEL_ENCODER_25FRC_F 0x10 +#define PANEL_ENCODER_50FRC_MASK 0x60 +#define PANEL_ENCODER_50FRC_A 0x00 +#define PANEL_ENCODER_50FRC_B 0x20 +#define PANEL_ENCODER_50FRC_C 0x40 +#define PANEL_ENCODER_50FRC_D 0x60 +#define PANEL_ENCODER_75FRC_MASK 0x80 +#define PANEL_ENCODER_75FRC_E 0x00 +#define PANEL_ENCODER_75FRC_F 0x80 + +/****************************************************************************/ +// Structures used by SetVoltageTable +/****************************************************************************/ +#define SET_VOLTAGE_TYPE_ASIC_VDDC 1 +#define SET_VOLTAGE_TYPE_ASIC_MVDDC 2 +#define SET_VOLTAGE_TYPE_ASIC_MVDDQ 3 +#define SET_VOLTAGE_TYPE_ASIC_VDDCI 4 +#define SET_VOLTAGE_INIT_MODE 5 +#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 //Gets the Max. voltage for the soldered Asic + +#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE 0x1 +#define SET_ASIC_VOLTAGE_MODE_SOURCE_A 0x2 +#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4 + +#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0 +#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1 +#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2 + +typedef struct _SET_VOLTAGE_PARAMETERS +{ + UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ + UCHAR ucVoltageMode; // To set all, to set source A or source B or ... + UCHAR ucVoltageIndex; // An index to tell which voltage level + UCHAR ucReserved; +}SET_VOLTAGE_PARAMETERS; + +typedef struct _SET_VOLTAGE_PARAMETERS_V2 +{ + UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ + UCHAR ucVoltageMode; // Not used, maybe use for state machine for differen power mode + USHORT usVoltageLevel; // real voltage level +}SET_VOLTAGE_PARAMETERS_V2; + +// used by both SetVoltageTable v1.3 and v1.4 +typedef struct _SET_VOLTAGE_PARAMETERS_V1_3 +{ + UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI + UCHAR ucVoltageMode; // Indicate action: Set voltage level + USHORT usVoltageLevel; // real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) +}SET_VOLTAGE_PARAMETERS_V1_3; + +//ucVoltageType +#define VOLTAGE_TYPE_VDDC 1 +#define VOLTAGE_TYPE_MVDDC 2 +#define VOLTAGE_TYPE_MVDDQ 3 +#define VOLTAGE_TYPE_VDDCI 4 +#define VOLTAGE_TYPE_VDDGFX 5 +#define VOLTAGE_TYPE_PCC 6 + +#define VOLTAGE_TYPE_GENERIC_I2C_1 0x11 +#define VOLTAGE_TYPE_GENERIC_I2C_2 0x12 +#define VOLTAGE_TYPE_GENERIC_I2C_3 0x13 +#define VOLTAGE_TYPE_GENERIC_I2C_4 0x14 +#define VOLTAGE_TYPE_GENERIC_I2C_5 0x15 +#define VOLTAGE_TYPE_GENERIC_I2C_6 0x16 +#define VOLTAGE_TYPE_GENERIC_I2C_7 0x17 +#define VOLTAGE_TYPE_GENERIC_I2C_8 0x18 +#define VOLTAGE_TYPE_GENERIC_I2C_9 0x19 +#define VOLTAGE_TYPE_GENERIC_I2C_10 0x1A + +//SET_VOLTAGE_PARAMETERS_V3.ucVoltageMode +#define ATOM_SET_VOLTAGE 0 //Set voltage Level +#define ATOM_INIT_VOLTAGE_REGULATOR 3 //Init Regulator +#define ATOM_SET_VOLTAGE_PHASE 4 //Set Vregulator Phase, only for SVID/PVID regulator +#define ATOM_GET_MAX_VOLTAGE 6 //Get Max Voltage, not used from SetVoltageTable v1.3 +#define ATOM_GET_VOLTAGE_LEVEL 6 //Get Voltage level from vitual voltage ID, not used for SetVoltage v1.4 +#define ATOM_GET_LEAKAGE_ID 8 //Get Leakage Voltage Id ( starting from SMU7x IP ), SetVoltage v1.4 + +// define vitual voltage id in usVoltageLevel +#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01 +#define ATOM_VIRTUAL_VOLTAGE_ID1 0xff02 +#define ATOM_VIRTUAL_VOLTAGE_ID2 0xff03 +#define ATOM_VIRTUAL_VOLTAGE_ID3 0xff04 +#define ATOM_VIRTUAL_VOLTAGE_ID4 0xff05 +#define ATOM_VIRTUAL_VOLTAGE_ID5 0xff06 +#define ATOM_VIRTUAL_VOLTAGE_ID6 0xff07 +#define ATOM_VIRTUAL_VOLTAGE_ID7 0xff08 + +typedef struct _SET_VOLTAGE_PS_ALLOCATION +{ + SET_VOLTAGE_PARAMETERS sASICSetVoltage; + WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; +}SET_VOLTAGE_PS_ALLOCATION; + +// New Added from SI for GetVoltageInfoTable, input parameter structure +typedef struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1 +{ + UCHAR ucVoltageType; // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI + UCHAR ucVoltageMode; // Input: Indicate action: Get voltage info + USHORT usVoltageLevel; // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id + ULONG ulReserved; +}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1; + +// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_VID +typedef struct _GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1 +{ + ULONG ulVotlageGpioState; + ULONG ulVoltageGPioMask; +}GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1; + +// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_STATEx_LEAKAGE_VID +typedef struct _GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1 +{ + USHORT usVoltageLevel; + USHORT usVoltageId; // Voltage Id programmed in Voltage Regulator + ULONG ulReseved; +}GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1; + +// GetVoltageInfo v1.1 ucVoltageMode +#define ATOM_GET_VOLTAGE_VID 0x00 +#define ATOM_GET_VOTLAGE_INIT_SEQ 0x03 +#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID 0x04 +#define ATOM_GET_VOLTAGE_SVID2 0x07 //Get SVI2 Regulator Info + +// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state +#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10 +// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state +#define ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11 + +#define ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12 +#define ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13 + + +// New Added from CI Hawaii for GetVoltageInfoTable, input parameter structure +typedef struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 +{ + UCHAR ucVoltageType; // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI + UCHAR ucVoltageMode; // Input: Indicate action: Get voltage info + USHORT usVoltageLevel; // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id + ULONG ulSCLKFreq; // Input: when ucVoltageMode= ATOM_GET_VOLTAGE_EVV_VOLTAGE, DPM state SCLK frequency, Define in PPTable SCLK/Voltage dependence table +}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2; + +// New in GetVoltageInfo v1.2 ucVoltageMode +#define ATOM_GET_VOLTAGE_EVV_VOLTAGE 0x09 + +// New Added from CI Hawaii for EVV feature +typedef struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 +{ + USHORT usVoltageLevel; // real voltage level in unit of mv + USHORT usVoltageId; // Voltage Id programmed in Voltage Regulator + USHORT usTDP_Current; // TDP_Current in unit of 0.01A + USHORT usTDP_Power; // TDP_Current in unit of 0.1W +}GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2; + +/****************************************************************************/ +// Structures used by TVEncoderControlTable +/****************************************************************************/ +typedef struct _TV_ENCODER_CONTROL_PARAMETERS +{ + USHORT usPixelClock; // in 10KHz; for bios convenient + UCHAR ucTvStandard; // See definition "ATOM_TV_NTSC ..." + UCHAR ucAction; // 0: turn off encoder + // 1: setup and turn on encoder +}TV_ENCODER_CONTROL_PARAMETERS; + +typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION +{ + TV_ENCODER_CONTROL_PARAMETERS sTVEncoder; + WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; // Don't set this one +}TV_ENCODER_CONTROL_PS_ALLOCATION; + +//==============================Data Table Portion==================================== + + +/****************************************************************************/ +// Structure used in Data.mtb +/****************************************************************************/ +typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES +{ + USHORT UtilityPipeLine; // Offest for the utility to get parser info,Don't change this position! + USHORT MultimediaCapabilityInfo; // Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios + USHORT MultimediaConfigInfo; // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios + USHORT StandardVESA_Timing; // Only used by Bios + USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4 + USHORT PaletteData; // Only used by BIOS + USHORT LCD_Info; // Shared by various SW components,latest version 1.3, was called LVDS_Info + USHORT DIGTransmitterInfo; // Internal used by VBIOS only version 3.1 + USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1 + USHORT SupportedDevicesInfo; // Will be obsolete from R600 + USHORT GPIO_I2C_Info; // Shared by various SW components,latest version 1.2 will be used from R600 + USHORT VRAM_UsageByFirmware; // Shared by various SW components,latest version 1.3 will be used from R600 + USHORT GPIO_Pin_LUT; // Shared by various SW components,latest version 1.1 + USHORT VESA_ToInternalModeLUT; // Only used by Bios + USHORT ComponentVideoInfo; // Shared by various SW components,latest version 2.1 will be used from R600 + USHORT PowerPlayInfo; // Shared by various SW components,latest version 2.1,new design from R600 + USHORT GPUVirtualizationInfo; // Will be obsolete from R600 + USHORT SaveRestoreInfo; // Only used by Bios + USHORT PPLL_SS_Info; // Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info + USHORT OemInfo; // Defined and used by external SW, should be obsolete soon + USHORT XTMDS_Info; // Will be obsolete from R600 + USHORT MclkSS_Info; // Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used + USHORT Object_Header; // Shared by various SW components,latest version 1.1 + USHORT IndirectIOAccess; // Only used by Bios,this table position can't change at all!! + USHORT MC_InitParameter; // Only used by command table + USHORT ASIC_VDDC_Info; // Will be obsolete from R600 + USHORT ASIC_InternalSS_Info; // New tabel name from R600, used to be called "ASIC_MVDDC_Info" + USHORT TV_VideoMode; // Only used by command table + USHORT VRAM_Info; // Only used by command table, latest version 1.3 + USHORT MemoryTrainingInfo; // Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1 + USHORT IntegratedSystemInfo; // Shared by various SW components + USHORT ASIC_ProfilingInfo; // New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600 + USHORT VoltageObjectInfo; // Shared by various SW components, latest version 1.1 + USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1 + USHORT ServiceInfo; +}ATOM_MASTER_LIST_OF_DATA_TABLES; + +typedef struct _ATOM_MASTER_DATA_TABLE +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables; +}ATOM_MASTER_DATA_TABLE; + +// For backward compatible +#define LVDS_Info LCD_Info +#define DAC_Info PaletteData +#define TMDS_Info DIGTransmitterInfo +#define CompassionateData GPUVirtualizationInfo + +/****************************************************************************/ +// Structure used in MultimediaCapabilityInfoTable +/****************************************************************************/ +typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulSignature; // HW info table signature string "$ATI" + UCHAR ucI2C_Type; // I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc) + UCHAR ucTV_OutInfo; // Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7) + UCHAR ucVideoPortInfo; // Provides the video port capabilities + UCHAR ucHostPortInfo; // Provides host port configuration information +}ATOM_MULTIMEDIA_CAPABILITY_INFO; + + +/****************************************************************************/ +// Structure used in MultimediaConfigInfoTable +/****************************************************************************/ +typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulSignature; // MM info table signature sting "$MMT" + UCHAR ucTunerInfo; // Type of tuner installed on the adapter (4:0) and video input for tuner (7:5) + UCHAR ucAudioChipInfo; // List the audio chip type (3:0) product type (4) and OEM revision (7:5) + UCHAR ucProductID; // Defines as OEM ID or ATI board ID dependent on product type setting + UCHAR ucMiscInfo1; // Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7) + UCHAR ucMiscInfo2; // I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6) + UCHAR ucMiscInfo3; // Video Decoder Type (3:0) Video In Standard/Crystal (7:4) + UCHAR ucMiscInfo4; // Video Decoder Host Config (2:0) reserved (7:3) + UCHAR ucVideoInput0Info;// Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) + UCHAR ucVideoInput1Info;// Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) + UCHAR ucVideoInput2Info;// Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) + UCHAR ucVideoInput3Info;// Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) + UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) +}ATOM_MULTIMEDIA_CONFIG_INFO; + + +/****************************************************************************/ +// Structures used in FirmwareInfoTable +/****************************************************************************/ + +// usBIOSCapability Defintion: +// Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; +// Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; +// Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; +// Others: Reserved +#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED 0x0001 +#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT 0x0002 +#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT 0x0004 +#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008 // (valid from v1.1 ~v1.4):=1: memclk SS enable, =0 memclk SS disable. +#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010 // (valid from v1.1 ~v1.4):=1: engclk SS enable, =0 engclk SS disable. +#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU 0x0020 +#define ATOM_BIOS_INFO_WMI_SUPPORT 0x0040 +#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM 0x0080 +#define ATOM_BIOS_INFO_HYPERMEMORY_SUPPORT 0x0100 +#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK 0x1E00 +#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000 +#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE 0x4000 +#define ATOM_BIOS_INFO_MEMORY_CLOCK_EXT_SS_SUPPORT 0x0008 // (valid from v2.1 ): =1: memclk ss enable with external ss chip +#define ATOM_BIOS_INFO_ENGINE_CLOCK_EXT_SS_SUPPORT 0x0010 // (valid from v2.1 ): =1: engclk ss enable with external ss chip + + +#ifndef _H2INC + +//Please don't add or expand this bitfield structure below, this one will retire soon.! +typedef struct _ATOM_FIRMWARE_CAPABILITY +{ +#if ATOM_BIG_ENDIAN + USHORT Reserved:1; + USHORT SCL2Redefined:1; + USHORT PostWithoutModeSet:1; + USHORT HyperMemory_Size:4; + USHORT HyperMemory_Support:1; + USHORT PPMode_Assigned:1; + USHORT WMI_SUPPORT:1; + USHORT GPUControlsBL:1; + USHORT EngineClockSS_Support:1; + USHORT MemoryClockSS_Support:1; + USHORT ExtendedDesktopSupport:1; + USHORT DualCRTC_Support:1; + USHORT FirmwarePosted:1; +#else + USHORT FirmwarePosted:1; + USHORT DualCRTC_Support:1; + USHORT ExtendedDesktopSupport:1; + USHORT MemoryClockSS_Support:1; + USHORT EngineClockSS_Support:1; + USHORT GPUControlsBL:1; + USHORT WMI_SUPPORT:1; + USHORT PPMode_Assigned:1; + USHORT HyperMemory_Support:1; + USHORT HyperMemory_Size:4; + USHORT PostWithoutModeSet:1; + USHORT SCL2Redefined:1; + USHORT Reserved:1; +#endif +}ATOM_FIRMWARE_CAPABILITY; + +typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS +{ + ATOM_FIRMWARE_CAPABILITY sbfAccess; + USHORT susAccess; +}ATOM_FIRMWARE_CAPABILITY_ACCESS; + +#else + +typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS +{ + USHORT susAccess; +}ATOM_FIRMWARE_CAPABILITY_ACCESS; + +#endif + +typedef struct _ATOM_FIRMWARE_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulFirmwareRevision; + ULONG ulDefaultEngineClock; //In 10Khz unit + ULONG ulDefaultMemoryClock; //In 10Khz unit + ULONG ulDriverTargetEngineClock; //In 10Khz unit + ULONG ulDriverTargetMemoryClock; //In 10Khz unit + ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit + ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit + ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit + ULONG ulASICMaxEngineClock; //In 10Khz unit + ULONG ulASICMaxMemoryClock; //In 10Khz unit + UCHAR ucASICMaxTemperature; + UCHAR ucPadding[3]; //Don't use them + ULONG aulReservedForBIOS[3]; //Don't use them + USHORT usMinEngineClockPLL_Input; //In 10Khz unit + USHORT usMaxEngineClockPLL_Input; //In 10Khz unit + USHORT usMinEngineClockPLL_Output; //In 10Khz unit + USHORT usMinMemoryClockPLL_Input; //In 10Khz unit + USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit + USHORT usMinMemoryClockPLL_Output; //In 10Khz unit + USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk + USHORT usMinPixelClockPLL_Input; //In 10Khz unit + USHORT usMaxPixelClockPLL_Input; //In 10Khz unit + USHORT usMinPixelClockPLL_Output; //In 10Khz unit, the definitions above can't change!!! + ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; + USHORT usReferenceClock; //In 10Khz unit + USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit + UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit + UCHAR ucDesign_ID; //Indicate what is the board design + UCHAR ucMemoryModule_ID; //Indicate what is the board design +}ATOM_FIRMWARE_INFO; + +typedef struct _ATOM_FIRMWARE_INFO_V1_2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulFirmwareRevision; + ULONG ulDefaultEngineClock; //In 10Khz unit + ULONG ulDefaultMemoryClock; //In 10Khz unit + ULONG ulDriverTargetEngineClock; //In 10Khz unit + ULONG ulDriverTargetMemoryClock; //In 10Khz unit + ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit + ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit + ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit + ULONG ulASICMaxEngineClock; //In 10Khz unit + ULONG ulASICMaxMemoryClock; //In 10Khz unit + UCHAR ucASICMaxTemperature; + UCHAR ucMinAllowedBL_Level; + UCHAR ucPadding[2]; //Don't use them + ULONG aulReservedForBIOS[2]; //Don't use them + ULONG ulMinPixelClockPLL_Output; //In 10Khz unit + USHORT usMinEngineClockPLL_Input; //In 10Khz unit + USHORT usMaxEngineClockPLL_Input; //In 10Khz unit + USHORT usMinEngineClockPLL_Output; //In 10Khz unit + USHORT usMinMemoryClockPLL_Input; //In 10Khz unit + USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit + USHORT usMinMemoryClockPLL_Output; //In 10Khz unit + USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk + USHORT usMinPixelClockPLL_Input; //In 10Khz unit + USHORT usMaxPixelClockPLL_Input; //In 10Khz unit + USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output + ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; + USHORT usReferenceClock; //In 10Khz unit + USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit + UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit + UCHAR ucDesign_ID; //Indicate what is the board design + UCHAR ucMemoryModule_ID; //Indicate what is the board design +}ATOM_FIRMWARE_INFO_V1_2; + +typedef struct _ATOM_FIRMWARE_INFO_V1_3 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulFirmwareRevision; + ULONG ulDefaultEngineClock; //In 10Khz unit + ULONG ulDefaultMemoryClock; //In 10Khz unit + ULONG ulDriverTargetEngineClock; //In 10Khz unit + ULONG ulDriverTargetMemoryClock; //In 10Khz unit + ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit + ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit + ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit + ULONG ulASICMaxEngineClock; //In 10Khz unit + ULONG ulASICMaxMemoryClock; //In 10Khz unit + UCHAR ucASICMaxTemperature; + UCHAR ucMinAllowedBL_Level; + UCHAR ucPadding[2]; //Don't use them + ULONG aulReservedForBIOS; //Don't use them + ULONG ul3DAccelerationEngineClock;//In 10Khz unit + ULONG ulMinPixelClockPLL_Output; //In 10Khz unit + USHORT usMinEngineClockPLL_Input; //In 10Khz unit + USHORT usMaxEngineClockPLL_Input; //In 10Khz unit + USHORT usMinEngineClockPLL_Output; //In 10Khz unit + USHORT usMinMemoryClockPLL_Input; //In 10Khz unit + USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit + USHORT usMinMemoryClockPLL_Output; //In 10Khz unit + USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk + USHORT usMinPixelClockPLL_Input; //In 10Khz unit + USHORT usMaxPixelClockPLL_Input; //In 10Khz unit + USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output + ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; + USHORT usReferenceClock; //In 10Khz unit + USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit + UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit + UCHAR ucDesign_ID; //Indicate what is the board design + UCHAR ucMemoryModule_ID; //Indicate what is the board design +}ATOM_FIRMWARE_INFO_V1_3; + +typedef struct _ATOM_FIRMWARE_INFO_V1_4 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulFirmwareRevision; + ULONG ulDefaultEngineClock; //In 10Khz unit + ULONG ulDefaultMemoryClock; //In 10Khz unit + ULONG ulDriverTargetEngineClock; //In 10Khz unit + ULONG ulDriverTargetMemoryClock; //In 10Khz unit + ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit + ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit + ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit + ULONG ulASICMaxEngineClock; //In 10Khz unit + ULONG ulASICMaxMemoryClock; //In 10Khz unit + UCHAR ucASICMaxTemperature; + UCHAR ucMinAllowedBL_Level; + USHORT usBootUpVDDCVoltage; //In MV unit + USHORT usLcdMinPixelClockPLL_Output; // In MHz unit + USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit + ULONG ul3DAccelerationEngineClock;//In 10Khz unit + ULONG ulMinPixelClockPLL_Output; //In 10Khz unit + USHORT usMinEngineClockPLL_Input; //In 10Khz unit + USHORT usMaxEngineClockPLL_Input; //In 10Khz unit + USHORT usMinEngineClockPLL_Output; //In 10Khz unit + USHORT usMinMemoryClockPLL_Input; //In 10Khz unit + USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit + USHORT usMinMemoryClockPLL_Output; //In 10Khz unit + USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk + USHORT usMinPixelClockPLL_Input; //In 10Khz unit + USHORT usMaxPixelClockPLL_Input; //In 10Khz unit + USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output + ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; + USHORT usReferenceClock; //In 10Khz unit + USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit + UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit + UCHAR ucDesign_ID; //Indicate what is the board design + UCHAR ucMemoryModule_ID; //Indicate what is the board design +}ATOM_FIRMWARE_INFO_V1_4; + +//the structure below to be used from Cypress +typedef struct _ATOM_FIRMWARE_INFO_V2_1 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulFirmwareRevision; + ULONG ulDefaultEngineClock; //In 10Khz unit + ULONG ulDefaultMemoryClock; //In 10Khz unit + ULONG ulReserved1; + ULONG ulReserved2; + ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit + ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit + ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit + ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock + ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit + UCHAR ucReserved1; //Was ucASICMaxTemperature; + UCHAR ucMinAllowedBL_Level; + USHORT usBootUpVDDCVoltage; //In MV unit + USHORT usLcdMinPixelClockPLL_Output; // In MHz unit + USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit + ULONG ulReserved4; //Was ulAsicMaximumVoltage + ULONG ulMinPixelClockPLL_Output; //In 10Khz unit + USHORT usMinEngineClockPLL_Input; //In 10Khz unit + USHORT usMaxEngineClockPLL_Input; //In 10Khz unit + USHORT usMinEngineClockPLL_Output; //In 10Khz unit + USHORT usMinMemoryClockPLL_Input; //In 10Khz unit + USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit + USHORT usMinMemoryClockPLL_Output; //In 10Khz unit + USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk + USHORT usMinPixelClockPLL_Input; //In 10Khz unit + USHORT usMaxPixelClockPLL_Input; //In 10Khz unit + USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output + ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; + USHORT usCoreReferenceClock; //In 10Khz unit + USHORT usMemoryReferenceClock; //In 10Khz unit + USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock + UCHAR ucMemoryModule_ID; //Indicate what is the board design + UCHAR ucReserved4[3]; + +}ATOM_FIRMWARE_INFO_V2_1; + +//the structure below to be used from NI +//ucTableFormatRevision=2 +//ucTableContentRevision=2 + +typedef struct _PRODUCT_BRANDING +{ + UCHAR ucEMBEDDED_CAP:2; // Bit[1:0] Embedded feature level + UCHAR ucReserved:2; // Bit[3:2] Reserved + UCHAR ucBRANDING_ID:4; // Bit[7:4] Branding ID +}PRODUCT_BRANDING; + +typedef struct _ATOM_FIRMWARE_INFO_V2_2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulFirmwareRevision; + ULONG ulDefaultEngineClock; //In 10Khz unit + ULONG ulDefaultMemoryClock; //In 10Khz unit + ULONG ulSPLL_OutputFreq; //In 10Khz unit + ULONG ulGPUPLL_OutputFreq; //In 10Khz unit + ULONG ulReserved1; //Was ulMaxEngineClockPLL_Output; //In 10Khz unit* + ULONG ulReserved2; //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit* + ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit + ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock ? + ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit. This is the frequency before DCDTO, corresponding to usBootUpVDDCVoltage. + UCHAR ucReserved3; //Was ucASICMaxTemperature; + UCHAR ucMinAllowedBL_Level; + USHORT usBootUpVDDCVoltage; //In MV unit + USHORT usLcdMinPixelClockPLL_Output; // In MHz unit + USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit + ULONG ulReserved4; //Was ulAsicMaximumVoltage + ULONG ulMinPixelClockPLL_Output; //In 10Khz unit + UCHAR ucRemoteDisplayConfig; + UCHAR ucReserved5[3]; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input + ULONG ulReserved6; //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input + ULONG ulReserved7; //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output + USHORT usReserved11; //Was usMaxPixelClock; //In 10Khz unit, Max. Pclk used only for DAC + USHORT usMinPixelClockPLL_Input; //In 10Khz unit + USHORT usMaxPixelClockPLL_Input; //In 10Khz unit + USHORT usBootUpVDDCIVoltage; //In unit of mv; Was usMinPixelClockPLL_Output; + ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; + USHORT usCoreReferenceClock; //In 10Khz unit + USHORT usMemoryReferenceClock; //In 10Khz unit + USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock + UCHAR ucMemoryModule_ID; //Indicate what is the board design + UCHAR ucCoolingSolution_ID; //0: Air cooling; 1: Liquid cooling ... [COOLING_SOLUTION] + PRODUCT_BRANDING ucProductBranding; // Bit[7:4]ucBRANDING_ID: Branding ID, Bit[3:2]ucReserved: Reserved, Bit[1:0]ucEMBEDDED_CAP: Embedded feature level. + UCHAR ucReserved9; + USHORT usBootUpMVDDCVoltage; //In unit of mv; Was usMinPixelClockPLL_Output; + USHORT usBootUpVDDGFXVoltage; //In unit of mv; + ULONG ulReserved10[3]; // New added comparing to previous version +}ATOM_FIRMWARE_INFO_V2_2; + +#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_2 + + +// definition of ucRemoteDisplayConfig +#define REMOTE_DISPLAY_DISABLE 0x00 +#define REMOTE_DISPLAY_ENABLE 0x01 + +/****************************************************************************/ +// Structures used in IntegratedSystemInfoTable +/****************************************************************************/ +#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN 0x2 +#define IGP_CAP_FLAG_AC_CARD 0x4 +#define IGP_CAP_FLAG_SDVO_CARD 0x8 +#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE 0x10 + +typedef struct _ATOM_INTEGRATED_SYSTEM_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulBootUpEngineClock; //in 10kHz unit + ULONG ulBootUpMemoryClock; //in 10kHz unit + ULONG ulMaxSystemMemoryClock; //in 10kHz unit + ULONG ulMinSystemMemoryClock; //in 10kHz unit + UCHAR ucNumberOfCyclesInPeriodHi; + UCHAR ucLCDTimingSel; //=0:not valid.!=0 sel this timing descriptor from LCD EDID. + USHORT usReserved1; + USHORT usInterNBVoltageLow; //An intermidiate PMW value to set the voltage + USHORT usInterNBVoltageHigh; //Another intermidiate PMW value to set the voltage + ULONG ulReserved[2]; + + USHORT usFSBClock; //In MHz unit + USHORT usCapabilityFlag; //Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable + //Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card + //Bit[4]==1: P/2 mode, ==0: P/1 mode + USHORT usPCIENBCfgReg7; //bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal + USHORT usK8MemoryClock; //in MHz unit + USHORT usK8SyncStartDelay; //in 0.01 us unit + USHORT usK8DataReturnTime; //in 0.01 us unit + UCHAR ucMaxNBVoltage; + UCHAR ucMinNBVoltage; + UCHAR ucMemoryType; //[7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved + UCHAR ucNumberOfCyclesInPeriod; //CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod + UCHAR ucStartingPWM_HighTime; //CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime + UCHAR ucHTLinkWidth; //16 bit vs. 8 bit + UCHAR ucMaxNBVoltageHigh; + UCHAR ucMinNBVoltageHigh; +}ATOM_INTEGRATED_SYSTEM_INFO; + +/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO +ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock + For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock +ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 + For AMD IGP,for now this can be 0 +ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 + For AMD IGP,for now this can be 0 + +usFSBClock: For Intel IGP,it's FSB Freq + For AMD IGP,it's HT Link Speed + +usK8MemoryClock: For AMD IGP only. For RevF CPU, set it to 200 +usK8SyncStartDelay: For AMD IGP only. Memory access latency in K8, required for watermark calculation +usK8DataReturnTime: For AMD IGP only. Memory access latency in K8, required for watermark calculation + +VC:Voltage Control +ucMaxNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all. +ucMinNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all. + +ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value. +ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0 + +ucMaxNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all. +ucMinNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all. + + +usInterNBVoltageLow: Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all. +usInterNBVoltageHigh: Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all. +*/ + + +/* +The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST; +Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need. +The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries. + +SW components can access the IGP system infor structure in the same way as before +*/ + + +typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulBootUpEngineClock; //in 10kHz unit + ULONG ulReserved1[2]; //must be 0x0 for the reserved + ULONG ulBootUpUMAClock; //in 10kHz unit + ULONG ulBootUpSidePortClock; //in 10kHz unit + ULONG ulMinSidePortClock; //in 10kHz unit + ULONG ulReserved2[6]; //must be 0x0 for the reserved + ULONG ulSystemConfig; //see explanation below + ULONG ulBootUpReqDisplayVector; + ULONG ulOtherDisplayMisc; + ULONG ulDDISlot1Config; + ULONG ulDDISlot2Config; + UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved + UCHAR ucUMAChannelNumber; + UCHAR ucDockingPinBit; + UCHAR ucDockingPinPolarity; + ULONG ulDockingPinCFGInfo; + ULONG ulCPUCapInfo; + USHORT usNumberOfCyclesInPeriod; + USHORT usMaxNBVoltage; + USHORT usMinNBVoltage; + USHORT usBootUpNBVoltage; + ULONG ulHTLinkFreq; //in 10Khz + USHORT usMinHTLinkWidth; + USHORT usMaxHTLinkWidth; + USHORT usUMASyncStartDelay; + USHORT usUMADataReturnTime; + USHORT usLinkStatusZeroTime; + USHORT usDACEfuse; //for storing badgap value (for RS880 only) + ULONG ulHighVoltageHTLinkFreq; // in 10Khz + ULONG ulLowVoltageHTLinkFreq; // in 10Khz + USHORT usMaxUpStreamHTLinkWidth; + USHORT usMaxDownStreamHTLinkWidth; + USHORT usMinUpStreamHTLinkWidth; + USHORT usMinDownStreamHTLinkWidth; + USHORT usFirmwareVersion; //0 means FW is not supported. Otherwise it's the FW version loaded by SBIOS and driver should enable FW. + USHORT usFullT0Time; // Input to calculate minimum HT link change time required by NB P-State. Unit is 0.01us. + ULONG ulReserved3[96]; //must be 0x0 +}ATOM_INTEGRATED_SYSTEM_INFO_V2; + +/* +ulBootUpEngineClock: Boot-up Engine Clock in 10Khz; +ulBootUpUMAClock: Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present +ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock + +ulSystemConfig: +Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode; +Bit[1]=1: system boots up at AMD overdrived state or user customized mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state + =0: system boots up at driver control state. Power state depends on PowerPlay table. +Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used. +Bit[3]=1: Only one power state(Performance) will be supported. + =0: Multiple power states supported from PowerPlay table. +Bit[4]=1: CLMC is supported and enabled on current system. + =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface. +Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement. + =0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied. +Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored. + =0: Voltage settings is determined by powerplay table. +Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue. + =0: Enable CLMC as regular mode, CDLD and CILR will be enabled. +Bit[8]=1: CDLF is supported and enabled on current system. + =0: CDLF is not supported or enabled on current system. +Bit[9]=1: DLL Shut Down feature is enabled on current system. + =0: DLL Shut Down feature is not enabled or supported on current system. + +ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions. + +ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion; + [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSuppportedStd definition; + +ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design). + [3:0] - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12) + [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 4=1 lane 3:0; bit 5=1 lane 7:4; bit 6=1 lane 11:8; bit 7=1 lane 15:12) + When a DDI connector is not "paired" (meaming two connections mutualexclusive on chassis or docking, only one of them can be connected at one time. + in both chassis and docking, SBIOS has to duplicate the same PCIE lane info from chassis to docking or vice versa. For example: + one DDI connector is only populated in docking with PCIE lane 8-11, but there is no paired connection on chassis, SBIOS has to copy bit 6 to bit 2. + + [15:8] - Lane configuration attribute; + [23:16]- Connector type, possible value: + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D + CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D + CONNECTOR_OBJECT_ID_HDMI_TYPE_A + CONNECTOR_OBJECT_ID_DISPLAYPORT + CONNECTOR_OBJECT_ID_eDP + [31:24]- Reserved + +ulDDISlot2Config: Same as Slot1. +ucMemoryType: SidePort memory type, set it to 0x0 when Sideport memory is not installed. Driver needs this info to change sideport memory clock. Not for display in CCC. +For IGP, Hypermemory is the only memory type showed in CCC. + +ucUMAChannelNumber: how many channels for the UMA; + +ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin +ucDockingPinBit: which bit in this register to read the pin status; +ucDockingPinPolarity:Polarity of the pin when docked; + +ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, [7:0]=4:Pharaoh, other bits reserved for now and must be 0x0 + +usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%. + +usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode. +usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode. + GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0 + PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1 + GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE + +usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value. + + +ulHTLinkFreq: Bootup HT link Frequency in 10Khz. +usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth. + If CDLW enabled, both upstream and downstream width should be the same during bootup. +usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth. + If CDLW enabled, both upstream and downstream width should be the same during bootup. + +usUMASyncStartDelay: Memory access latency, required for watermark calculation +usUMADataReturnTime: Memory access latency, required for watermark calculation +usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us +for Griffin or Greyhound. SBIOS needs to convert to actual time by: + if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us) + if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us) + if T0Ttime [5:4]=10b, then usLinkStatusZeroTime=T0Ttime [3:0]*2.0us (0.0 to 30us) + if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us) + +ulHighVoltageHTLinkFreq: HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0. + This must be less than or equal to ulHTLinkFreq(bootup frequency). +ulLowVoltageHTLinkFreq: HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0. + This must be less than or equal to ulHighVoltageHTLinkFreq. + +usMaxUpStreamHTLinkWidth: Asymmetric link width support in the future, to replace usMaxHTLinkWidth. Not used for now. +usMaxDownStreamHTLinkWidth: same as above. +usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to replace usMinHTLinkWidth. Not used for now. +usMinDownStreamHTLinkWidth: same as above. +*/ + +// ATOM_INTEGRATED_SYSTEM_INFO::ulCPUCapInfo - CPU type definition +#define INTEGRATED_SYSTEM_INFO__UNKNOWN_CPU 0 +#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GRIFFIN 1 +#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND 2 +#define INTEGRATED_SYSTEM_INFO__AMD_CPU__K8 3 +#define INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH 4 +#define INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI 5 + +#define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI // this deff reflects max defined CPU code + +#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001 +#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002 +#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004 +#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY 0x00000008 +#define SYSTEM_CONFIG_CLMC_ENABLED 0x00000010 +#define SYSTEM_CONFIG_CDLW_ENABLED 0x00000020 +#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED 0x00000040 +#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED 0x00000080 +#define SYSTEM_CONFIG_CDLF_ENABLED 0x00000100 +#define SYSTEM_CONFIG_DLL_SHUTDOWN_ENABLED 0x00000200 + +#define IGP_DDI_SLOT_LANE_CONFIG_MASK 0x000000FF + +#define b0IGP_DDI_SLOT_LANE_MAP_MASK 0x0F +#define b0IGP_DDI_SLOT_DOCKING_LANE_MAP_MASK 0xF0 +#define b0IGP_DDI_SLOT_CONFIG_LANE_0_3 0x01 +#define b0IGP_DDI_SLOT_CONFIG_LANE_4_7 0x02 +#define b0IGP_DDI_SLOT_CONFIG_LANE_8_11 0x04 +#define b0IGP_DDI_SLOT_CONFIG_LANE_12_15 0x08 + +#define IGP_DDI_SLOT_ATTRIBUTE_MASK 0x0000FF00 +#define IGP_DDI_SLOT_CONFIG_REVERSED 0x00000100 +#define b1IGP_DDI_SLOT_CONFIG_REVERSED 0x01 + +#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK 0x00FF0000 + +// IntegratedSystemInfoTable new Rev is V5 after V2, because of the real rev of V2 is v1.4. This rev is used for RR +typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulBootUpEngineClock; //in 10kHz unit + ULONG ulDentistVCOFreq; //Dentist VCO clock in 10kHz unit, the source of GPU SCLK, LCLK, UCLK and VCLK. + ULONG ulLClockFreq; //GPU Lclk freq in 10kHz unit, have relationship with NCLK in NorthBridge + ULONG ulBootUpUMAClock; //in 10kHz unit + ULONG ulReserved1[8]; //must be 0x0 for the reserved + ULONG ulBootUpReqDisplayVector; + ULONG ulOtherDisplayMisc; + ULONG ulReserved2[4]; //must be 0x0 for the reserved + ULONG ulSystemConfig; //TBD + ULONG ulCPUCapInfo; //TBD + USHORT usMaxNBVoltage; //high NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse; + USHORT usMinNBVoltage; //low NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse; + USHORT usBootUpNBVoltage; //boot up NB voltage + UCHAR ucHtcTmpLmt; //bit [22:16] of D24F3x64 Hardware Thermal Control (HTC) Register, may not be needed, TBD + UCHAR ucTjOffset; //bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed, TBD + ULONG ulReserved3[4]; //must be 0x0 for the reserved + ULONG ulDDISlot1Config; //see above ulDDISlot1Config definition + ULONG ulDDISlot2Config; + ULONG ulDDISlot3Config; + ULONG ulDDISlot4Config; + ULONG ulReserved4[4]; //must be 0x0 for the reserved + UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved + UCHAR ucUMAChannelNumber; + USHORT usReserved; + ULONG ulReserved5[4]; //must be 0x0 for the reserved + ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];//arrays with values for CSR M3 arbiter for default + ULONG ulCSR_M3_ARB_CNTL_UVD[10]; //arrays with values for CSR M3 arbiter for UVD playback + ULONG ulCSR_M3_ARB_CNTL_FS3D[10];//arrays with values for CSR M3 arbiter for Full Screen 3D applications + ULONG ulReserved6[61]; //must be 0x0 +}ATOM_INTEGRATED_SYSTEM_INFO_V5; + + + +/****************************************************************************/ +// Structure used in GPUVirtualizationInfoTable +/****************************************************************************/ +typedef struct _ATOM_GPU_VIRTUALIZATION_INFO_V2_1 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulMCUcodeRomStartAddr; + ULONG ulMCUcodeLength; + ULONG ulSMCUcodeRomStartAddr; + ULONG ulSMCUcodeLength; + ULONG ulRLCVUcodeRomStartAddr; + ULONG ulRLCVUcodeLength; + ULONG ulTOCUcodeStartAddr; + ULONG ulTOCUcodeLength; + ULONG ulSMCPatchTableStartAddr; + ULONG ulSmcPatchTableLength; + ULONG ulSystemFlag; +}ATOM_GPU_VIRTUALIZATION_INFO_V2_1; + + +#define ATOM_CRT_INT_ENCODER1_INDEX 0x00000000 +#define ATOM_LCD_INT_ENCODER1_INDEX 0x00000001 +#define ATOM_TV_INT_ENCODER1_INDEX 0x00000002 +#define ATOM_DFP_INT_ENCODER1_INDEX 0x00000003 +#define ATOM_CRT_INT_ENCODER2_INDEX 0x00000004 +#define ATOM_LCD_EXT_ENCODER1_INDEX 0x00000005 +#define ATOM_TV_EXT_ENCODER1_INDEX 0x00000006 +#define ATOM_DFP_EXT_ENCODER1_INDEX 0x00000007 +#define ATOM_CV_INT_ENCODER1_INDEX 0x00000008 +#define ATOM_DFP_INT_ENCODER2_INDEX 0x00000009 +#define ATOM_CRT_EXT_ENCODER1_INDEX 0x0000000A +#define ATOM_CV_EXT_ENCODER1_INDEX 0x0000000B +#define ATOM_DFP_INT_ENCODER3_INDEX 0x0000000C +#define ATOM_DFP_INT_ENCODER4_INDEX 0x0000000D + +// define ASIC internal encoder id ( bit vector ), used for CRTC_SourceSelTable +#define ASIC_INT_DAC1_ENCODER_ID 0x00 +#define ASIC_INT_TV_ENCODER_ID 0x02 +#define ASIC_INT_DIG1_ENCODER_ID 0x03 +#define ASIC_INT_DAC2_ENCODER_ID 0x04 +#define ASIC_EXT_TV_ENCODER_ID 0x06 +#define ASIC_INT_DVO_ENCODER_ID 0x07 +#define ASIC_INT_DIG2_ENCODER_ID 0x09 +#define ASIC_EXT_DIG_ENCODER_ID 0x05 +#define ASIC_EXT_DIG2_ENCODER_ID 0x08 +#define ASIC_INT_DIG3_ENCODER_ID 0x0a +#define ASIC_INT_DIG4_ENCODER_ID 0x0b +#define ASIC_INT_DIG5_ENCODER_ID 0x0c +#define ASIC_INT_DIG6_ENCODER_ID 0x0d +#define ASIC_INT_DIG7_ENCODER_ID 0x0e + +//define Encoder attribute +#define ATOM_ANALOG_ENCODER 0 +#define ATOM_DIGITAL_ENCODER 1 +#define ATOM_DP_ENCODER 2 + +#define ATOM_ENCODER_ENUM_MASK 0x70 +#define ATOM_ENCODER_ENUM_ID1 0x00 +#define ATOM_ENCODER_ENUM_ID2 0x10 +#define ATOM_ENCODER_ENUM_ID3 0x20 +#define ATOM_ENCODER_ENUM_ID4 0x30 +#define ATOM_ENCODER_ENUM_ID5 0x40 +#define ATOM_ENCODER_ENUM_ID6 0x50 + +#define ATOM_DEVICE_CRT1_INDEX 0x00000000 +#define ATOM_DEVICE_LCD1_INDEX 0x00000001 +#define ATOM_DEVICE_TV1_INDEX 0x00000002 +#define ATOM_DEVICE_DFP1_INDEX 0x00000003 +#define ATOM_DEVICE_CRT2_INDEX 0x00000004 +#define ATOM_DEVICE_LCD2_INDEX 0x00000005 +#define ATOM_DEVICE_DFP6_INDEX 0x00000006 +#define ATOM_DEVICE_DFP2_INDEX 0x00000007 +#define ATOM_DEVICE_CV_INDEX 0x00000008 +#define ATOM_DEVICE_DFP3_INDEX 0x00000009 +#define ATOM_DEVICE_DFP4_INDEX 0x0000000A +#define ATOM_DEVICE_DFP5_INDEX 0x0000000B + +#define ATOM_DEVICE_RESERVEDC_INDEX 0x0000000C +#define ATOM_DEVICE_RESERVEDD_INDEX 0x0000000D +#define ATOM_DEVICE_RESERVEDE_INDEX 0x0000000E +#define ATOM_DEVICE_RESERVEDF_INDEX 0x0000000F +#define ATOM_MAX_SUPPORTED_DEVICE_INFO (ATOM_DEVICE_DFP3_INDEX+1) +#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2 ATOM_MAX_SUPPORTED_DEVICE_INFO +#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1 ) + +#define ATOM_MAX_SUPPORTED_DEVICE (ATOM_DEVICE_RESERVEDF_INDEX+1) + +#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX ) +#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX ) +#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX ) +#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX ) +#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX ) +#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX ) +#define ATOM_DEVICE_DFP6_SUPPORT (0x1L << ATOM_DEVICE_DFP6_INDEX ) +#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX ) +#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX ) +#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX ) +#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX ) +#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX ) + + +#define ATOM_DEVICE_CRT_SUPPORT (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT) +#define ATOM_DEVICE_DFP_SUPPORT (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | ATOM_DEVICE_DFP5_SUPPORT | ATOM_DEVICE_DFP6_SUPPORT) +#define ATOM_DEVICE_TV_SUPPORT ATOM_DEVICE_TV1_SUPPORT +#define ATOM_DEVICE_LCD_SUPPORT (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT) + +#define ATOM_DEVICE_CONNECTOR_TYPE_MASK 0x000000F0 +#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT 0x00000004 +#define ATOM_DEVICE_CONNECTOR_VGA 0x00000001 +#define ATOM_DEVICE_CONNECTOR_DVI_I 0x00000002 +#define ATOM_DEVICE_CONNECTOR_DVI_D 0x00000003 +#define ATOM_DEVICE_CONNECTOR_DVI_A 0x00000004 +#define ATOM_DEVICE_CONNECTOR_SVIDEO 0x00000005 +#define ATOM_DEVICE_CONNECTOR_COMPOSITE 0x00000006 +#define ATOM_DEVICE_CONNECTOR_LVDS 0x00000007 +#define ATOM_DEVICE_CONNECTOR_DIGI_LINK 0x00000008 +#define ATOM_DEVICE_CONNECTOR_SCART 0x00000009 +#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_A 0x0000000A +#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_B 0x0000000B +#define ATOM_DEVICE_CONNECTOR_CASE_1 0x0000000E +#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT 0x0000000F + + +#define ATOM_DEVICE_DAC_INFO_MASK 0x0000000F +#define ATOM_DEVICE_DAC_INFO_SHIFT 0x00000000 +#define ATOM_DEVICE_DAC_INFO_NODAC 0x00000000 +#define ATOM_DEVICE_DAC_INFO_DACA 0x00000001 +#define ATOM_DEVICE_DAC_INFO_DACB 0x00000002 +#define ATOM_DEVICE_DAC_INFO_EXDAC 0x00000003 + +#define ATOM_DEVICE_I2C_ID_NOI2C 0x00000000 + +#define ATOM_DEVICE_I2C_LINEMUX_MASK 0x0000000F +#define ATOM_DEVICE_I2C_LINEMUX_SHIFT 0x00000000 + +#define ATOM_DEVICE_I2C_ID_MASK 0x00000070 +#define ATOM_DEVICE_I2C_ID_SHIFT 0x00000004 +#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE 0x00000001 +#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE 0x00000002 +#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 //For IGP RS600 +#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 //For IGP RS690 + +#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK 0x00000080 +#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT 0x00000007 +#define ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C 0x00000000 +#define ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C 0x00000001 + +// usDeviceSupport: +// Bits0 = 0 - no CRT1 support= 1- CRT1 is supported +// Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported +// Bit 2 = 0 - no TV1 support= 1- TV1 is supported +// Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported +// Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported +// Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported +// Bit 6 = 0 - no DFP6 support= 1- DFP6 is supported +// Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported +// Bit 8 = 0 - no CV support= 1- CV is supported +// Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported +// Bit 10= 0 - no DFP4 support= 1- DFP4 is supported +// Bit 11= 0 - no DFP5 support= 1- DFP5 is supported +// +// + +/****************************************************************************/ +// Structure used in MclkSS_InfoTable +/****************************************************************************/ +// ucI2C_ConfigID +// [7:0] - I2C LINE Associate ID +// = 0 - no I2C +// [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection) +// = 0, [6:0]=SW assisted I2C ID +// [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use +// = 2, HW engine for Multimedia use +// = 3-7 Reserved for future I2C engines +// [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C + +typedef struct _ATOM_I2C_ID_CONFIG +{ +#if ATOM_BIG_ENDIAN + UCHAR bfHW_Capable:1; + UCHAR bfHW_EngineID:3; + UCHAR bfI2C_LineMux:4; +#else + UCHAR bfI2C_LineMux:4; + UCHAR bfHW_EngineID:3; + UCHAR bfHW_Capable:1; +#endif +}ATOM_I2C_ID_CONFIG; + +typedef union _ATOM_I2C_ID_CONFIG_ACCESS +{ + ATOM_I2C_ID_CONFIG sbfAccess; + UCHAR ucAccess; +}ATOM_I2C_ID_CONFIG_ACCESS; + + +/****************************************************************************/ +// Structure used in GPIO_I2C_InfoTable +/****************************************************************************/ +typedef struct _ATOM_GPIO_I2C_ASSIGMENT +{ + USHORT usClkMaskRegisterIndex; + USHORT usClkEnRegisterIndex; + USHORT usClkY_RegisterIndex; + USHORT usClkA_RegisterIndex; + USHORT usDataMaskRegisterIndex; + USHORT usDataEnRegisterIndex; + USHORT usDataY_RegisterIndex; + USHORT usDataA_RegisterIndex; + ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; + UCHAR ucClkMaskShift; + UCHAR ucClkEnShift; + UCHAR ucClkY_Shift; + UCHAR ucClkA_Shift; + UCHAR ucDataMaskShift; + UCHAR ucDataEnShift; + UCHAR ucDataY_Shift; + UCHAR ucDataA_Shift; + UCHAR ucReserved1; + UCHAR ucReserved2; +}ATOM_GPIO_I2C_ASSIGMENT; + +typedef struct _ATOM_GPIO_I2C_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE]; +}ATOM_GPIO_I2C_INFO; + +/****************************************************************************/ +// Common Structure used in other structures +/****************************************************************************/ + +#ifndef _H2INC + +//Please don't add or expand this bitfield structure below, this one will retire soon.! +typedef struct _ATOM_MODE_MISC_INFO +{ +#if ATOM_BIG_ENDIAN + USHORT Reserved:6; + USHORT RGB888:1; + USHORT DoubleClock:1; + USHORT Interlace:1; + USHORT CompositeSync:1; + USHORT V_ReplicationBy2:1; + USHORT H_ReplicationBy2:1; + USHORT VerticalCutOff:1; + USHORT VSyncPolarity:1; //0=Active High, 1=Active Low + USHORT HSyncPolarity:1; //0=Active High, 1=Active Low + USHORT HorizontalCutOff:1; +#else + USHORT HorizontalCutOff:1; + USHORT HSyncPolarity:1; //0=Active High, 1=Active Low + USHORT VSyncPolarity:1; //0=Active High, 1=Active Low + USHORT VerticalCutOff:1; + USHORT H_ReplicationBy2:1; + USHORT V_ReplicationBy2:1; + USHORT CompositeSync:1; + USHORT Interlace:1; + USHORT DoubleClock:1; + USHORT RGB888:1; + USHORT Reserved:6; +#endif +}ATOM_MODE_MISC_INFO; + +typedef union _ATOM_MODE_MISC_INFO_ACCESS +{ + ATOM_MODE_MISC_INFO sbfAccess; + USHORT usAccess; +}ATOM_MODE_MISC_INFO_ACCESS; + +#else + +typedef union _ATOM_MODE_MISC_INFO_ACCESS +{ + USHORT usAccess; +}ATOM_MODE_MISC_INFO_ACCESS; + +#endif + +// usModeMiscInfo- +#define ATOM_H_CUTOFF 0x01 +#define ATOM_HSYNC_POLARITY 0x02 //0=Active High, 1=Active Low +#define ATOM_VSYNC_POLARITY 0x04 //0=Active High, 1=Active Low +#define ATOM_V_CUTOFF 0x08 +#define ATOM_H_REPLICATIONBY2 0x10 +#define ATOM_V_REPLICATIONBY2 0x20 +#define ATOM_COMPOSITESYNC 0x40 +#define ATOM_INTERLACE 0x80 +#define ATOM_DOUBLE_CLOCK_MODE 0x100 +#define ATOM_RGB888_MODE 0x200 + +//usRefreshRate- +#define ATOM_REFRESH_43 43 +#define ATOM_REFRESH_47 47 +#define ATOM_REFRESH_56 56 +#define ATOM_REFRESH_60 60 +#define ATOM_REFRESH_65 65 +#define ATOM_REFRESH_70 70 +#define ATOM_REFRESH_72 72 +#define ATOM_REFRESH_75 75 +#define ATOM_REFRESH_85 85 + +// ATOM_MODE_TIMING data are exactly the same as VESA timing data. +// Translation from EDID to ATOM_MODE_TIMING, use the following formula. +// +// VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK +// = EDID_HA + EDID_HBL +// VESA_HDISP = VESA_ACTIVE = EDID_HA +// VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH +// = EDID_HA + EDID_HSO +// VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW +// VESA_BORDER = EDID_BORDER + + +/****************************************************************************/ +// Structure used in SetCRTC_UsingDTDTimingTable +/****************************************************************************/ +typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS +{ + USHORT usH_Size; + USHORT usH_Blanking_Time; + USHORT usV_Size; + USHORT usV_Blanking_Time; + USHORT usH_SyncOffset; + USHORT usH_SyncWidth; + USHORT usV_SyncOffset; + USHORT usV_SyncWidth; + ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; + UCHAR ucH_Border; // From DFP EDID + UCHAR ucV_Border; + UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 + UCHAR ucPadding[3]; +}SET_CRTC_USING_DTD_TIMING_PARAMETERS; + +/****************************************************************************/ +// Structure used in SetCRTC_TimingTable +/****************************************************************************/ +typedef struct _SET_CRTC_TIMING_PARAMETERS +{ + USHORT usH_Total; // horizontal total + USHORT usH_Disp; // horizontal display + USHORT usH_SyncStart; // horozontal Sync start + USHORT usH_SyncWidth; // horizontal Sync width + USHORT usV_Total; // vertical total + USHORT usV_Disp; // vertical display + USHORT usV_SyncStart; // vertical Sync start + USHORT usV_SyncWidth; // vertical Sync width + ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; + UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 + UCHAR ucOverscanRight; // right + UCHAR ucOverscanLeft; // left + UCHAR ucOverscanBottom; // bottom + UCHAR ucOverscanTop; // top + UCHAR ucReserved; +}SET_CRTC_TIMING_PARAMETERS; +#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS + + +/****************************************************************************/ +// Structure used in StandardVESA_TimingTable +// AnalogTV_InfoTable +// ComponentVideoInfoTable +/****************************************************************************/ +typedef struct _ATOM_MODE_TIMING +{ + USHORT usCRTC_H_Total; + USHORT usCRTC_H_Disp; + USHORT usCRTC_H_SyncStart; + USHORT usCRTC_H_SyncWidth; + USHORT usCRTC_V_Total; + USHORT usCRTC_V_Disp; + USHORT usCRTC_V_SyncStart; + USHORT usCRTC_V_SyncWidth; + USHORT usPixelClock; //in 10Khz unit + ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; + USHORT usCRTC_OverscanRight; + USHORT usCRTC_OverscanLeft; + USHORT usCRTC_OverscanBottom; + USHORT usCRTC_OverscanTop; + USHORT usReserve; + UCHAR ucInternalModeNumber; + UCHAR ucRefreshRate; +}ATOM_MODE_TIMING; + +typedef struct _ATOM_DTD_FORMAT +{ + USHORT usPixClk; + USHORT usHActive; + USHORT usHBlanking_Time; + USHORT usVActive; + USHORT usVBlanking_Time; + USHORT usHSyncOffset; + USHORT usHSyncWidth; + USHORT usVSyncOffset; + USHORT usVSyncWidth; + USHORT usImageHSize; + USHORT usImageVSize; + UCHAR ucHBorder; + UCHAR ucVBorder; + ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; + UCHAR ucInternalModeNumber; + UCHAR ucRefreshRate; +}ATOM_DTD_FORMAT; + +/****************************************************************************/ +// Structure used in LVDS_InfoTable +// * Need a document to describe this table +/****************************************************************************/ +#define SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004 +#define SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008 +#define SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010 +#define SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020 +#define SUPPORTED_LCD_REFRESHRATE_48Hz 0x0040 + +//ucTableFormatRevision=1 +//ucTableContentRevision=1 +typedef struct _ATOM_LVDS_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_DTD_FORMAT sLCDTiming; + USHORT usModePatchTableOffset; + USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec. + USHORT usOffDelayInMs; + UCHAR ucPowerSequenceDigOntoDEin10Ms; + UCHAR ucPowerSequenceDEtoBLOnin10Ms; + UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} + // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} + // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} + // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} + UCHAR ucPanelDefaultRefreshRate; + UCHAR ucPanelIdentification; + UCHAR ucSS_Id; +}ATOM_LVDS_INFO; + +//ucTableFormatRevision=1 +//ucTableContentRevision=2 +typedef struct _ATOM_LVDS_INFO_V12 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_DTD_FORMAT sLCDTiming; + USHORT usExtInfoTableOffset; + USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec. + USHORT usOffDelayInMs; + UCHAR ucPowerSequenceDigOntoDEin10Ms; + UCHAR ucPowerSequenceDEtoBLOnin10Ms; + UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} + // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} + // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} + // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} + UCHAR ucPanelDefaultRefreshRate; + UCHAR ucPanelIdentification; + UCHAR ucSS_Id; + USHORT usLCDVenderID; + USHORT usLCDProductID; + UCHAR ucLCDPanel_SpecialHandlingCap; + UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable + UCHAR ucReserved[2]; +}ATOM_LVDS_INFO_V12; + +//Definitions for ucLCDPanel_SpecialHandlingCap: + +//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. +//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL +#define LCDPANEL_CAP_READ_EDID 0x1 + +//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together +//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static +//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12 +#define LCDPANEL_CAP_DRR_SUPPORTED 0x2 + +//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP. +#define LCDPANEL_CAP_eDP 0x4 + + +//Color Bit Depth definition in EDID V1.4 @BYTE 14h +//Bit 6 5 4 + // 0 0 0 - Color bit depth is undefined + // 0 0 1 - 6 Bits per Primary Color + // 0 1 0 - 8 Bits per Primary Color + // 0 1 1 - 10 Bits per Primary Color + // 1 0 0 - 12 Bits per Primary Color + // 1 0 1 - 14 Bits per Primary Color + // 1 1 0 - 16 Bits per Primary Color + // 1 1 1 - Reserved + +#define PANEL_COLOR_BIT_DEPTH_MASK 0x70 + +// Bit7:{=0:Random Dithering disabled;1 Random Dithering enabled} +#define PANEL_RANDOM_DITHER 0x80 +#define PANEL_RANDOM_DITHER_MASK 0x80 + +#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 // no need to change this + + +typedef struct _ATOM_LCD_REFRESH_RATE_SUPPORT +{ + UCHAR ucSupportedRefreshRate; + UCHAR ucMinRefreshRateForDRR; +}ATOM_LCD_REFRESH_RATE_SUPPORT; + +/****************************************************************************/ +// Structures used by LCD_InfoTable V1.3 Note: previous version was called ATOM_LVDS_INFO_V12 +// ASIC Families: NI +// ucTableFormatRevision=1 +// ucTableContentRevision=3 +/****************************************************************************/ +typedef struct _ATOM_LCD_INFO_V13 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_DTD_FORMAT sLCDTiming; + USHORT usExtInfoTableOffset; + union + { + USHORT usSupportedRefreshRate; + ATOM_LCD_REFRESH_RATE_SUPPORT sRefreshRateSupport; + }; + ULONG ulReserved0; + UCHAR ucLCD_Misc; // Reorganized in V13 + // Bit0: {=0:single, =1:dual}, + // Bit1: {=0:LDI format for RGB888, =1 FPDI format for RGB888} // was {=0:666RGB, =1:888RGB}, + // Bit3:2: {Grey level} + // Bit6:4 Color Bit Depth definition (see below definition in EDID V1.4 @BYTE 14h) + // Bit7 Reserved. was for ATOM_PANEL_MISC_API_ENABLED, still need it? + UCHAR ucPanelDefaultRefreshRate; + UCHAR ucPanelIdentification; + UCHAR ucSS_Id; + USHORT usLCDVenderID; + USHORT usLCDProductID; + UCHAR ucLCDPanel_SpecialHandlingCap; // Reorganized in V13 + // Bit0: Once DAL sees this CAP is set, it will read EDID from LCD on its own + // Bit1: See LCDPANEL_CAP_DRR_SUPPORTED + // Bit2: a quick reference whether an embadded panel (LCD1 ) is LVDS (0) or eDP (1) + // Bit7-3: Reserved + UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable + USHORT usBacklightPWM; // Backlight PWM in Hz. New in _V13 + + UCHAR ucPowerSequenceDIGONtoDE_in4Ms; + UCHAR ucPowerSequenceDEtoVARY_BL_in4Ms; + UCHAR ucPowerSequenceVARY_BLtoDE_in4Ms; + UCHAR ucPowerSequenceDEtoDIGON_in4Ms; + + UCHAR ucOffDelay_in4Ms; + UCHAR ucPowerSequenceVARY_BLtoBLON_in4Ms; + UCHAR ucPowerSequenceBLONtoVARY_BL_in4Ms; + UCHAR ucReserved1; + + UCHAR ucDPCD_eDP_CONFIGURATION_CAP; // dpcd 0dh + UCHAR ucDPCD_MAX_LINK_RATE; // dpcd 01h + UCHAR ucDPCD_MAX_LANE_COUNT; // dpcd 02h + UCHAR ucDPCD_MAX_DOWNSPREAD; // dpcd 03h + + USHORT usMaxPclkFreqInSingleLink; // Max PixelClock frequency in single link mode. + UCHAR uceDPToLVDSRxId; + UCHAR ucLcdReservd; + ULONG ulReserved[2]; +}ATOM_LCD_INFO_V13; + +#define ATOM_LCD_INFO_LAST ATOM_LCD_INFO_V13 + +//Definitions for ucLCD_Misc +#define ATOM_PANEL_MISC_V13_DUAL 0x00000001 +#define ATOM_PANEL_MISC_V13_FPDI 0x00000002 +#define ATOM_PANEL_MISC_V13_GREY_LEVEL 0x0000000C +#define ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT 2 +#define ATOM_PANEL_MISC_V13_COLOR_BIT_DEPTH_MASK 0x70 +#define ATOM_PANEL_MISC_V13_6BIT_PER_COLOR 0x10 +#define ATOM_PANEL_MISC_V13_8BIT_PER_COLOR 0x20 + +//Color Bit Depth definition in EDID V1.4 @BYTE 14h +//Bit 6 5 4 + // 0 0 0 - Color bit depth is undefined + // 0 0 1 - 6 Bits per Primary Color + // 0 1 0 - 8 Bits per Primary Color + // 0 1 1 - 10 Bits per Primary Color + // 1 0 0 - 12 Bits per Primary Color + // 1 0 1 - 14 Bits per Primary Color + // 1 1 0 - 16 Bits per Primary Color + // 1 1 1 - Reserved + +//Definitions for ucLCDPanel_SpecialHandlingCap: + +//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. +//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL +#define LCDPANEL_CAP_V13_READ_EDID 0x1 // = LCDPANEL_CAP_READ_EDID no change comparing to previous version + +//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together +//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static +//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12 +#define LCDPANEL_CAP_V13_DRR_SUPPORTED 0x2 // = LCDPANEL_CAP_DRR_SUPPORTED no change comparing to previous version + +//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP. +#define LCDPANEL_CAP_V13_eDP 0x4 // = LCDPANEL_CAP_eDP no change comparing to previous version + +//uceDPToLVDSRxId +#define eDP_TO_LVDS_RX_DISABLE 0x00 // no eDP->LVDS translator chip +#define eDP_TO_LVDS_COMMON_ID 0x01 // common eDP->LVDS translator chip without AMD SW init +#define eDP_TO_LVDS_RT_ID 0x02 // RT tansaltor which require AMD SW init + +typedef struct _ATOM_PATCH_RECORD_MODE +{ + UCHAR ucRecordType; + USHORT usHDisp; + USHORT usVDisp; +}ATOM_PATCH_RECORD_MODE; + +typedef struct _ATOM_LCD_RTS_RECORD +{ + UCHAR ucRecordType; + UCHAR ucRTSValue; +}ATOM_LCD_RTS_RECORD; + +//!! If the record below exits, it shoud always be the first record for easy use in command table!!! +// The record below is only used when LVDS_Info is present. From ATOM_LVDS_INFO_V12, use ucLCDPanel_SpecialHandlingCap instead. +typedef struct _ATOM_LCD_MODE_CONTROL_CAP +{ + UCHAR ucRecordType; + USHORT usLCDCap; +}ATOM_LCD_MODE_CONTROL_CAP; + +#define LCD_MODE_CAP_BL_OFF 1 +#define LCD_MODE_CAP_CRTC_OFF 2 +#define LCD_MODE_CAP_PANEL_OFF 4 + + +typedef struct _ATOM_FAKE_EDID_PATCH_RECORD +{ + UCHAR ucRecordType; + UCHAR ucFakeEDIDLength; // = 128 means EDID lenght is 128 bytes, otherwise the EDID length = ucFakeEDIDLength*128 + UCHAR ucFakeEDIDString[1]; // This actually has ucFakeEdidLength elements. +} ATOM_FAKE_EDID_PATCH_RECORD; + +typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD +{ + UCHAR ucRecordType; + USHORT usHSize; + USHORT usVSize; +}ATOM_PANEL_RESOLUTION_PATCH_RECORD; + +#define LCD_MODE_PATCH_RECORD_MODE_TYPE 1 +#define LCD_RTS_RECORD_TYPE 2 +#define LCD_CAP_RECORD_TYPE 3 +#define LCD_FAKE_EDID_PATCH_RECORD_TYPE 4 +#define LCD_PANEL_RESOLUTION_RECORD_TYPE 5 +#define LCD_EDID_OFFSET_PATCH_RECORD_TYPE 6 +#define ATOM_RECORD_END_TYPE 0xFF + +/****************************Spread Spectrum Info Table Definitions **********************/ + +//ucTableFormatRevision=1 +//ucTableContentRevision=2 +typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT +{ + USHORT usSpreadSpectrumPercentage; + UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Bit2=1: PCIE REFCLK SS =0 iternal PPLL SS Others:TBD + UCHAR ucSS_Step; + UCHAR ucSS_Delay; + UCHAR ucSS_Id; + UCHAR ucRecommendedRef_Div; + UCHAR ucSS_Range; //it was reserved for V11 +}ATOM_SPREAD_SPECTRUM_ASSIGNMENT; + +#define ATOM_MAX_SS_ENTRY 16 +#define ATOM_DP_SS_ID1 0x0f1 // SS ID for internal DP stream at 2.7Ghz. if ATOM_DP_SS_ID2 does not exist in SS_InfoTable, it is used for internal DP stream at 1.62Ghz as well. +#define ATOM_DP_SS_ID2 0x0f2 // SS ID for internal DP stream at 1.62Ghz, if it exists in SS_InfoTable. +#define ATOM_LVLINK_2700MHz_SS_ID 0x0f3 // SS ID for LV link translator chip at 2.7Ghz +#define ATOM_LVLINK_1620MHz_SS_ID 0x0f4 // SS ID for LV link translator chip at 1.62Ghz + + + +#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000 +#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000 +#define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001 +#define ATOM_SS_CENTRE_SPREAD_MODE 0x00000001 +#define ATOM_INTERNAL_SS_MASK 0x00000000 +#define ATOM_EXTERNAL_SS_MASK 0x00000002 +#define EXEC_SS_STEP_SIZE_SHIFT 2 +#define EXEC_SS_DELAY_SHIFT 4 +#define ACTIVEDATA_TO_BLON_DELAY_SHIFT 4 + +typedef struct _ATOM_SPREAD_SPECTRUM_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY]; +}ATOM_SPREAD_SPECTRUM_INFO; + + +/****************************************************************************/ +// Structure used in AnalogTV_InfoTable (Top level) +/****************************************************************************/ +//ucTVBootUpDefaultStd definiton: + +//ATOM_TV_NTSC 1 +//ATOM_TV_NTSCJ 2 +//ATOM_TV_PAL 3 +//ATOM_TV_PALM 4 +//ATOM_TV_PALCN 5 +//ATOM_TV_PALN 6 +//ATOM_TV_PAL60 7 +//ATOM_TV_SECAM 8 + +//ucTVSuppportedStd definition: +#define NTSC_SUPPORT 0x1 +#define NTSCJ_SUPPORT 0x2 + +#define PAL_SUPPORT 0x4 +#define PALM_SUPPORT 0x8 +#define PALCN_SUPPORT 0x10 +#define PALN_SUPPORT 0x20 +#define PAL60_SUPPORT 0x40 +#define SECAM_SUPPORT 0x80 + +#define MAX_SUPPORTED_TV_TIMING 2 + +typedef struct _ATOM_ANALOG_TV_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR ucTV_SuppportedStandard; + UCHAR ucTV_BootUpDefaultStandard; + UCHAR ucExt_TV_ASIC_ID; + UCHAR ucExt_TV_ASIC_SlaveAddr; + ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; +}ATOM_ANALOG_TV_INFO; + +typedef struct _ATOM_DPCD_INFO +{ + UCHAR ucRevisionNumber; //10h : Revision 1.0; 11h : Revision 1.1 + UCHAR ucMaxLinkRate; //06h : 1.62Gbps per lane; 0Ah = 2.7Gbps per lane + UCHAR ucMaxLane; //Bits 4:0 = MAX_LANE_COUNT (1/2/4). Bit 7 = ENHANCED_FRAME_CAP + UCHAR ucMaxDownSpread; //Bit0 = 0: No Down spread; Bit0 = 1: 0.5% (Subject to change according to DP spec) +}ATOM_DPCD_INFO; + +#define ATOM_DPCD_MAX_LANE_MASK 0x1F + +/**************************************************************************/ +// VRAM usage and their defintions + +// One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. +// Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. +// All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned! +// To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR +// To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX + +// Moved VESA_MEMORY_IN_64K_BLOCK definition to "AtomConfig.h" so that it can be redefined in design (SKU). +//#ifndef VESA_MEMORY_IN_64K_BLOCK +//#define VESA_MEMORY_IN_64K_BLOCK 0x100 //256*64K=16Mb (Max. VESA memory is 16Mb!) +//#endif + +#define ATOM_EDID_RAW_DATASIZE 256 //In Bytes +#define ATOM_HWICON_SURFACE_SIZE 4096 //In Bytes +#define ATOM_HWICON_INFOTABLE_SIZE 32 +#define MAX_DTD_MODE_IN_VRAM 6 +#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT) +#define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) +//20 bytes for Encoder Type and DPCD in STD EDID area +#define DFP_ENCODER_TYPE_OFFSET (ATOM_EDID_RAW_DATASIZE + ATOM_DTD_MODE_SUPPORT_TBL_SIZE + ATOM_STD_MODE_SUPPORT_TBL_SIZE - 20) +#define ATOM_DP_DPCD_OFFSET (DFP_ENCODER_TYPE_OFFSET + 4 ) + +#define ATOM_HWICON1_SURFACE_ADDR 0 +#define ATOM_HWICON2_SURFACE_ADDR (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE) +#define ATOM_HWICON_INFOTABLE_ADDR (ATOM_HWICON2_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE) +#define ATOM_CRT1_EDID_ADDR (ATOM_HWICON_INFOTABLE_ADDR + ATOM_HWICON_INFOTABLE_SIZE) +#define ATOM_CRT1_DTD_MODE_TBL_ADDR (ATOM_CRT1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) +#define ATOM_CRT1_STD_MODE_TBL_ADDR (ATOM_CRT1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) + +#define ATOM_LCD1_EDID_ADDR (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) +#define ATOM_LCD1_DTD_MODE_TBL_ADDR (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) +#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) + +#define ATOM_TV1_DTD_MODE_TBL_ADDR (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) + +#define ATOM_DFP1_EDID_ADDR (ATOM_TV1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) +#define ATOM_DFP1_DTD_MODE_TBL_ADDR (ATOM_DFP1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) +#define ATOM_DFP1_STD_MODE_TBL_ADDR (ATOM_DFP1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) + +#define ATOM_CRT2_EDID_ADDR (ATOM_DFP1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) +#define ATOM_CRT2_DTD_MODE_TBL_ADDR (ATOM_CRT2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) +#define ATOM_CRT2_STD_MODE_TBL_ADDR (ATOM_CRT2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) + +#define ATOM_LCD2_EDID_ADDR (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) +#define ATOM_LCD2_DTD_MODE_TBL_ADDR (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) +#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) + +#define ATOM_DFP6_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) +#define ATOM_DFP6_DTD_MODE_TBL_ADDR (ATOM_DFP6_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) +#define ATOM_DFP6_STD_MODE_TBL_ADDR (ATOM_DFP6_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) + +#define ATOM_DFP2_EDID_ADDR (ATOM_DFP6_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) +#define ATOM_DFP2_DTD_MODE_TBL_ADDR (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) +#define ATOM_DFP2_STD_MODE_TBL_ADDR (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) + +#define ATOM_CV_EDID_ADDR (ATOM_DFP2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) +#define ATOM_CV_DTD_MODE_TBL_ADDR (ATOM_CV_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) +#define ATOM_CV_STD_MODE_TBL_ADDR (ATOM_CV_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) + +#define ATOM_DFP3_EDID_ADDR (ATOM_CV_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) +#define ATOM_DFP3_DTD_MODE_TBL_ADDR (ATOM_DFP3_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) +#define ATOM_DFP3_STD_MODE_TBL_ADDR (ATOM_DFP3_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) + +#define ATOM_DFP4_EDID_ADDR (ATOM_DFP3_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) +#define ATOM_DFP4_DTD_MODE_TBL_ADDR (ATOM_DFP4_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) +#define ATOM_DFP4_STD_MODE_TBL_ADDR (ATOM_DFP4_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) + +#define ATOM_DFP5_EDID_ADDR (ATOM_DFP4_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) +#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) +#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) + +#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) + +#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 1024) +#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START + 512 + +//The size below is in Kb! +#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC) + +#define ATOM_VRAM_RESERVE_V2_SIZE 32 + +#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L +#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30 +#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1 +#define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0 + +/***********************************************************************************/ +// Structure used in VRAM_UsageByFirmwareTable +// Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm +// at running time. +// note2: From RV770, the memory is more than 32bit addressable, so we will change +// ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains +// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware +// (in offset to start of memory address) is KB aligned instead of byte aligend. +// Note3: +/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged +constant across VGA or non VGA adapter, +for CAIL, The size of FB access area is known, only thing missing is the Offset of FB Access area, so we can have: + +If (ulStartAddrUsedByFirmware!=0) +FBAccessAreaOffset= ulStartAddrUsedByFirmware - usFBUsedbyDrvInKB; +Reserved area has been claimed by VBIOS including this FB access area; CAIL doesn't need to reserve any extra area for this purpose +else //Non VGA case + if (FB_Size<=2Gb) + FBAccessAreaOffset= FB_Size - usFBUsedbyDrvInKB; + else + FBAccessAreaOffset= Aper_Size - usFBUsedbyDrvInKB + +CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/ + +/***********************************************************************************/ +#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1 + +typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO +{ + ULONG ulStartAddrUsedByFirmware; + USHORT usFirmwareUseInKb; + USHORT usReserved; +}ATOM_FIRMWARE_VRAM_RESERVE_INFO; + +typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_FIRMWARE_VRAM_RESERVE_INFO asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO]; +}ATOM_VRAM_USAGE_BY_FIRMWARE; + +// change verion to 1.5, when allow driver to allocate the vram area for command table access. +typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5 +{ + ULONG ulStartAddrUsedByFirmware; + USHORT usFirmwareUseInKb; + USHORT usFBUsedByDrvInKb; +}ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5; + +typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5 asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO]; +}ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5; + +/****************************************************************************/ +// Structure used in GPIO_Pin_LUTTable +/****************************************************************************/ +typedef struct _ATOM_GPIO_PIN_ASSIGNMENT +{ + USHORT usGpioPin_AIndex; + UCHAR ucGpioPinBitShift; + UCHAR ucGPIO_ID; +}ATOM_GPIO_PIN_ASSIGNMENT; + +//ucGPIO_ID pre-define id for multiple usage +// GPIO use to control PCIE_VDDC in certain SLT board +#define PCIE_VDDC_CONTROL_GPIO_PINID 56 + +//from SMU7.x, if ucGPIO_ID=PP_AC_DC_SWITCH_GPIO_PINID in GPIO_LUTTable, AC/DC swithing feature is enable +#define PP_AC_DC_SWITCH_GPIO_PINID 60 +//from SMU7.x, if ucGPIO_ID=VDDC_REGULATOR_VRHOT_GPIO_PINID in GPIO_LUTable, VRHot feature is enable +#define VDDC_VRHOT_GPIO_PINID 61 +//if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, Peak Current Control feature is enabled +#define VDDC_PCC_GPIO_PINID 62 +// Only used on certain SLT/PA board to allow utility to cut Efuse. +#define EFUSE_CUT_ENABLE_GPIO_PINID 63 +// ucGPIO=DRAM_SELF_REFRESH_GPIO_PIND uses for memory self refresh (ucGPIO=0, DRAM self-refresh; ucGPIO= +#define DRAM_SELF_REFRESH_GPIO_PINID 64 +// Thermal interrupt output->system thermal chip GPIO pin +#define THERMAL_INT_OUTPUT_GPIO_PINID 65 + + +typedef struct _ATOM_GPIO_PIN_LUT +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1]; +}ATOM_GPIO_PIN_LUT; + +/****************************************************************************/ +// Structure used in ComponentVideoInfoTable +/****************************************************************************/ +#define GPIO_PIN_ACTIVE_HIGH 0x1 +#define MAX_SUPPORTED_CV_STANDARDS 5 + +// definitions for ATOM_D_INFO.ucSettings +#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F // [4:0] +#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 // [6:5] = must be zeroed out +#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 // [7] + +typedef struct _ATOM_GPIO_INFO +{ + USHORT usAOffset; + UCHAR ucSettings; + UCHAR ucReserved; +}ATOM_GPIO_INFO; + +// definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector) +#define ATOM_CV_RESTRICT_FORMAT_SELECTION 0x2 + +// definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i +#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 //[7]; +#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F //[6:0] + +// definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode +//Line 3 out put 5V. +#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 //represent gpio 3 state for 16:9 +#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 //represent gpio 4 state for 16:9 +#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0 + +//Line 3 out put 2.2V +#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 //represent gpio 3 state for 4:3 Letter box +#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 //represent gpio 4 state for 4:3 Letter box +#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2 + +//Line 3 out put 0V +#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 //represent gpio 3 state for 4:3 +#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 //represent gpio 4 state for 4:3 +#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4 + +#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F // bit [5:0] + +#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 //bit 7 + +//GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks. +#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 //bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. +#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 //bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. + + +typedef struct _ATOM_COMPONENT_VIDEO_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usMask_PinRegisterIndex; + USHORT usEN_PinRegisterIndex; + USHORT usY_PinRegisterIndex; + USHORT usA_PinRegisterIndex; + UCHAR ucBitShift; + UCHAR ucPinActiveState; //ucPinActiveState: Bit0=1 active high, =0 active low + ATOM_DTD_FORMAT sReserved; // must be zeroed out + UCHAR ucMiscInfo; + UCHAR uc480i; + UCHAR uc480p; + UCHAR uc720p; + UCHAR uc1080i; + UCHAR ucLetterBoxMode; + UCHAR ucReserved[3]; + UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector + ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS]; + ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS]; +}ATOM_COMPONENT_VIDEO_INFO; + +//ucTableFormatRevision=2 +//ucTableContentRevision=1 +typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR ucMiscInfo; + UCHAR uc480i; + UCHAR uc480p; + UCHAR uc720p; + UCHAR uc1080i; + UCHAR ucReserved; + UCHAR ucLetterBoxMode; + UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector + ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS]; + ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS]; +}ATOM_COMPONENT_VIDEO_INFO_V21; + +#define ATOM_COMPONENT_VIDEO_INFO_LAST ATOM_COMPONENT_VIDEO_INFO_V21 + +/****************************************************************************/ +// Structure used in object_InfoTable +/****************************************************************************/ +typedef struct _ATOM_OBJECT_HEADER +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usDeviceSupport; + USHORT usConnectorObjectTableOffset; + USHORT usRouterObjectTableOffset; + USHORT usEncoderObjectTableOffset; + USHORT usProtectionObjectTableOffset; //only available when Protection block is independent. + USHORT usDisplayPathTableOffset; +}ATOM_OBJECT_HEADER; + +typedef struct _ATOM_OBJECT_HEADER_V3 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usDeviceSupport; + USHORT usConnectorObjectTableOffset; + USHORT usRouterObjectTableOffset; + USHORT usEncoderObjectTableOffset; + USHORT usProtectionObjectTableOffset; //only available when Protection block is independent. + USHORT usDisplayPathTableOffset; + USHORT usMiscObjectTableOffset; +}ATOM_OBJECT_HEADER_V3; + + +typedef struct _ATOM_DISPLAY_OBJECT_PATH +{ + USHORT usDeviceTag; //supported device + USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH + USHORT usConnObjectId; //Connector Object ID + USHORT usGPUObjectId; //GPU ID + USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. +}ATOM_DISPLAY_OBJECT_PATH; + +typedef struct _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH +{ + USHORT usDeviceTag; //supported device + USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH + USHORT usConnObjectId; //Connector Object ID + USHORT usGPUObjectId; //GPU ID + USHORT usGraphicObjIds[2]; //usGraphicObjIds[0]= GPU internal encoder, usGraphicObjIds[1]= external encoder +}ATOM_DISPLAY_EXTERNAL_OBJECT_PATH; + +typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE +{ + UCHAR ucNumOfDispPath; + UCHAR ucVersion; + UCHAR ucPadding[2]; + ATOM_DISPLAY_OBJECT_PATH asDispPath[1]; +}ATOM_DISPLAY_OBJECT_PATH_TABLE; + +typedef struct _ATOM_OBJECT //each object has this structure +{ + USHORT usObjectID; + USHORT usSrcDstTableOffset; + USHORT usRecordOffset; //this pointing to a bunch of records defined below + USHORT usReserved; +}ATOM_OBJECT; + +typedef struct _ATOM_OBJECT_TABLE //Above 4 object table offset pointing to a bunch of objects all have this structure +{ + UCHAR ucNumberOfObjects; + UCHAR ucPadding[3]; + ATOM_OBJECT asObjects[1]; +}ATOM_OBJECT_TABLE; + +typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset pointing to this structure +{ + UCHAR ucNumberOfSrc; + USHORT usSrcObjectID[1]; + UCHAR ucNumberOfDst; + USHORT usDstObjectID[1]; +}ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT; + + +//Two definitions below are for OPM on MXM module designs + +#define EXT_HPDPIN_LUTINDEX_0 0 +#define EXT_HPDPIN_LUTINDEX_1 1 +#define EXT_HPDPIN_LUTINDEX_2 2 +#define EXT_HPDPIN_LUTINDEX_3 3 +#define EXT_HPDPIN_LUTINDEX_4 4 +#define EXT_HPDPIN_LUTINDEX_5 5 +#define EXT_HPDPIN_LUTINDEX_6 6 +#define EXT_HPDPIN_LUTINDEX_7 7 +#define MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES (EXT_HPDPIN_LUTINDEX_7+1) + +#define EXT_AUXDDC_LUTINDEX_0 0 +#define EXT_AUXDDC_LUTINDEX_1 1 +#define EXT_AUXDDC_LUTINDEX_2 2 +#define EXT_AUXDDC_LUTINDEX_3 3 +#define EXT_AUXDDC_LUTINDEX_4 4 +#define EXT_AUXDDC_LUTINDEX_5 5 +#define EXT_AUXDDC_LUTINDEX_6 6 +#define EXT_AUXDDC_LUTINDEX_7 7 +#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1) + +//ucChannelMapping are defined as following +//for DP connector, eDP, DP to VGA/LVDS +//Bit[1:0]: Define which pin connect to DP connector DP_Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +//Bit[3:2]: Define which pin connect to DP connector DP_Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +//Bit[5:4]: Define which pin connect to DP connector DP_Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +//Bit[7:6]: Define which pin connect to DP connector DP_Lane3, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +typedef struct _ATOM_DP_CONN_CHANNEL_MAPPING +{ +#if ATOM_BIG_ENDIAN + UCHAR ucDP_Lane3_Source:2; + UCHAR ucDP_Lane2_Source:2; + UCHAR ucDP_Lane1_Source:2; + UCHAR ucDP_Lane0_Source:2; +#else + UCHAR ucDP_Lane0_Source:2; + UCHAR ucDP_Lane1_Source:2; + UCHAR ucDP_Lane2_Source:2; + UCHAR ucDP_Lane3_Source:2; +#endif +}ATOM_DP_CONN_CHANNEL_MAPPING; + +//for DVI/HDMI, in dual link case, both links have to have same mapping. +//Bit[1:0]: Define which pin connect to DVI connector data Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +//Bit[3:2]: Define which pin connect to DVI connector data Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +//Bit[5:4]: Define which pin connect to DVI connector data Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +//Bit[7:6]: Define which pin connect to DVI connector clock lane, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +typedef struct _ATOM_DVI_CONN_CHANNEL_MAPPING +{ +#if ATOM_BIG_ENDIAN + UCHAR ucDVI_CLK_Source:2; + UCHAR ucDVI_DATA0_Source:2; + UCHAR ucDVI_DATA1_Source:2; + UCHAR ucDVI_DATA2_Source:2; +#else + UCHAR ucDVI_DATA2_Source:2; + UCHAR ucDVI_DATA1_Source:2; + UCHAR ucDVI_DATA0_Source:2; + UCHAR ucDVI_CLK_Source:2; +#endif +}ATOM_DVI_CONN_CHANNEL_MAPPING; + +typedef struct _EXT_DISPLAY_PATH +{ + USHORT usDeviceTag; //A bit vector to show what devices are supported + USHORT usDeviceACPIEnum; //16bit device ACPI id. + USHORT usDeviceConnector; //A physical connector for displays to plug in, using object connector definitions + UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT + UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT + USHORT usExtEncoderObjId; //external encoder object id + union{ + UCHAR ucChannelMapping; // if ucChannelMapping=0, using default one to one mapping + ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping; + ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping; + }; + UCHAR ucChPNInvert; // bit vector for up to 8 lanes, =0: P and N is not invert, =1 P and N is inverted + USHORT usCaps; + USHORT usReserved; +}EXT_DISPLAY_PATH; + +#define NUMBER_OF_UCHAR_FOR_GUID 16 +#define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7 + +//usCaps +#define EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE 0x01 +#define EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN 0x02 +#define EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 0x04 +#define EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT 0x08 + +typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string + EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries. + UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0. + UCHAR uc3DStereoPinId; // use for eDP panel + UCHAR ucRemoteDisplayConfig; + UCHAR uceDPToLVDSRxId; + UCHAR ucFixDPVoltageSwing; // usCaps[1]=1, this indicate DP_LANE_SET value + UCHAR Reserved[3]; // for potential expansion +}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO; + +//Related definitions, all records are differnt but they have a commond header +typedef struct _ATOM_COMMON_RECORD_HEADER +{ + UCHAR ucRecordType; //An emun to indicate the record type + UCHAR ucRecordSize; //The size of the whole record in byte +}ATOM_COMMON_RECORD_HEADER; + + +#define ATOM_I2C_RECORD_TYPE 1 +#define ATOM_HPD_INT_RECORD_TYPE 2 +#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE 3 +#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE 4 +#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE +#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE +#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE 7 +#define ATOM_JTAG_RECORD_TYPE 8 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE +#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE 9 +#define ATOM_ENCODER_DVO_CF_RECORD_TYPE 10 +#define ATOM_CONNECTOR_CF_RECORD_TYPE 11 +#define ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE 12 +#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE 13 +#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14 +#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15 +#define ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE 16 //This is for the case when connectors are not known to object table +#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table +#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record +#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19 +#define ATOM_ENCODER_CAP_RECORD_TYPE 20 +#define ATOM_BRACKET_LAYOUT_RECORD_TYPE 21 + + +//Must be updated when new record type is added,equal to that record definition! +#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_ENCODER_CAP_RECORD_TYPE + +typedef struct _ATOM_I2C_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + ATOM_I2C_ID_CONFIG sucI2cId; + UCHAR ucI2CAddr; //The slave address, it's 0 when the record is attached to connector for DDC +}ATOM_I2C_RECORD; + +typedef struct _ATOM_HPD_INT_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucHPDIntGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info + UCHAR ucPlugged_PinState; +}ATOM_HPD_INT_RECORD; + + +typedef struct _ATOM_OUTPUT_PROTECTION_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucProtectionFlag; + UCHAR ucReserved; +}ATOM_OUTPUT_PROTECTION_RECORD; + +typedef struct _ATOM_CONNECTOR_DEVICE_TAG +{ + ULONG ulACPIDeviceEnum; //Reserved for now + USHORT usDeviceID; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT" + USHORT usPadding; +}ATOM_CONNECTOR_DEVICE_TAG; + +typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucNumberOfDevice; + UCHAR ucReserved; + ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation +}ATOM_CONNECTOR_DEVICE_TAG_RECORD; + + +typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucConfigGPIOID; + UCHAR ucConfigGPIOState; //Set to 1 when it's active high to enable external flow in + UCHAR ucFlowinGPIPID; + UCHAR ucExtInGPIPID; +}ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD; + +typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucCTL1GPIO_ID; + UCHAR ucCTL1GPIOState; //Set to 1 when it's active high + UCHAR ucCTL2GPIO_ID; + UCHAR ucCTL2GPIOState; //Set to 1 when it's active high + UCHAR ucCTL3GPIO_ID; + UCHAR ucCTL3GPIOState; //Set to 1 when it's active high + UCHAR ucCTLFPGA_IN_ID; + UCHAR ucPadding[3]; +}ATOM_ENCODER_FPGA_CONTROL_RECORD; + +typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info + UCHAR ucTVActiveState; //Indicating when the pin==0 or 1 when TV is connected +}ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD; + +typedef struct _ATOM_JTAG_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucTMSGPIO_ID; + UCHAR ucTMSGPIOState; //Set to 1 when it's active high + UCHAR ucTCKGPIO_ID; + UCHAR ucTCKGPIOState; //Set to 1 when it's active high + UCHAR ucTDOGPIO_ID; + UCHAR ucTDOGPIOState; //Set to 1 when it's active high + UCHAR ucTDIGPIO_ID; + UCHAR ucTDIGPIOState; //Set to 1 when it's active high + UCHAR ucPadding[2]; +}ATOM_JTAG_RECORD; + + +//The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually +typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR +{ + UCHAR ucGPIOID; // GPIO_ID, find the corresponding ID in GPIO_LUT table + UCHAR ucGPIO_PinState; // Pin state showing how to set-up the pin +}ATOM_GPIO_PIN_CONTROL_PAIR; + +typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucFlags; // Future expnadibility + UCHAR ucNumberOfPins; // Number of GPIO pins used to control the object + ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; // the real gpio pin pair determined by number of pins ucNumberOfPins +}ATOM_OBJECT_GPIO_CNTL_RECORD; + +//Definitions for GPIO pin state +#define GPIO_PIN_TYPE_INPUT 0x00 +#define GPIO_PIN_TYPE_OUTPUT 0x10 +#define GPIO_PIN_TYPE_HW_CONTROL 0x20 + +//For GPIO_PIN_TYPE_OUTPUT the following is defined +#define GPIO_PIN_OUTPUT_STATE_MASK 0x01 +#define GPIO_PIN_OUTPUT_STATE_SHIFT 0 +#define GPIO_PIN_STATE_ACTIVE_LOW 0x0 +#define GPIO_PIN_STATE_ACTIVE_HIGH 0x1 + +// Indexes to GPIO array in GLSync record +// GLSync record is for Frame Lock/Gen Lock feature. +#define ATOM_GPIO_INDEX_GLSYNC_REFCLK 0 +#define ATOM_GPIO_INDEX_GLSYNC_HSYNC 1 +#define ATOM_GPIO_INDEX_GLSYNC_VSYNC 2 +#define ATOM_GPIO_INDEX_GLSYNC_SWAP_REQ 3 +#define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT 4 +#define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5 +#define ATOM_GPIO_INDEX_GLSYNC_V_RESET 6 +#define ATOM_GPIO_INDEX_GLSYNC_SWAP_CNTL 7 +#define ATOM_GPIO_INDEX_GLSYNC_SWAP_SEL 8 +#define ATOM_GPIO_INDEX_GLSYNC_MAX 9 + +typedef struct _ATOM_ENCODER_DVO_CF_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + ULONG ulStrengthControl; // DVOA strength control for CF + UCHAR ucPadding[2]; +}ATOM_ENCODER_DVO_CF_RECORD; + +// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap +#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by HW encoder +#define ATOM_ENCODER_CAP_RECORD_HBR2_EN 0x02 // DP1.2 HBR2 setting is qualified and HBR2 can be enabled +#define ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN 0x04 // HDMI2.0 6Gbps enable or not. + +typedef struct _ATOM_ENCODER_CAP_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + union { + USHORT usEncoderCap; + struct { +#if ATOM_BIG_ENDIAN + USHORT usReserved:14; // Bit1-15 may be defined for other capability in future + USHORT usHBR2En:1; // Bit1 is for DP1.2 HBR2 enable + USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability. +#else + USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability. + USHORT usHBR2En:1; // Bit1 is for DP1.2 HBR2 enable + USHORT usReserved:14; // Bit1-15 may be defined for other capability in future +#endif + }; + }; +}ATOM_ENCODER_CAP_RECORD; + +// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle +#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1 +#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2 + +typedef struct _ATOM_CONNECTOR_CF_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + USHORT usMaxPixClk; + UCHAR ucFlowCntlGpioId; + UCHAR ucSwapCntlGpioId; + UCHAR ucConnectedDvoBundle; + UCHAR ucPadding; +}ATOM_CONNECTOR_CF_RECORD; + +typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + ATOM_DTD_FORMAT asTiming; +}ATOM_CONNECTOR_HARDCODE_DTD_RECORD; + +typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; //ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE + UCHAR ucSubConnectorType; //CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A + UCHAR ucReserved; +}ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD; + + +typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucMuxType; //decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state + UCHAR ucMuxControlPin; + UCHAR ucMuxState[2]; //for alligment purpose +}ATOM_ROUTER_DDC_PATH_SELECT_RECORD; + +typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucMuxType; + UCHAR ucMuxControlPin; + UCHAR ucMuxState[2]; //for alligment purpose +}ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD; + +// define ucMuxType +#define ATOM_ROUTER_MUX_PIN_STATE_MASK 0x0f +#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT 0x01 + +typedef struct _ATOM_CONNECTOR_HPDPIN_LUT_RECORD //record for ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucHPDPINMap[MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES]; //An fixed size array which maps external pins to internal GPIO_PIN_INFO table +}ATOM_CONNECTOR_HPDPIN_LUT_RECORD; + +typedef struct _ATOM_CONNECTOR_AUXDDC_LUT_RECORD //record for ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE +{ + ATOM_COMMON_RECORD_HEADER sheader; + ATOM_I2C_ID_CONFIG ucAUXDDCMap[MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES]; //An fixed size array which maps external pins to internal DDC ID +}ATOM_CONNECTOR_AUXDDC_LUT_RECORD; + +typedef struct _ATOM_OBJECT_LINK_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + USHORT usObjectID; //could be connector, encorder or other object in object.h +}ATOM_OBJECT_LINK_RECORD; + +typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + USHORT usReserved; +}ATOM_CONNECTOR_REMOTE_CAP_RECORD; + +typedef struct _ATOM_CONNECTOR_LAYOUT_INFO +{ + USHORT usConnectorObjectId; + UCHAR ucConnectorType; + UCHAR ucPosition; +}ATOM_CONNECTOR_LAYOUT_INFO; + +// define ATOM_CONNECTOR_LAYOUT_INFO.ucConnectorType to describe the display connector size +#define CONNECTOR_TYPE_DVI_D 1 +#define CONNECTOR_TYPE_DVI_I 2 +#define CONNECTOR_TYPE_VGA 3 +#define CONNECTOR_TYPE_HDMI 4 +#define CONNECTOR_TYPE_DISPLAY_PORT 5 +#define CONNECTOR_TYPE_MINI_DISPLAY_PORT 6 + +typedef struct _ATOM_BRACKET_LAYOUT_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucLength; + UCHAR ucWidth; + UCHAR ucConnNum; + UCHAR ucReserved; + ATOM_CONNECTOR_LAYOUT_INFO asConnInfo[1]; +}ATOM_BRACKET_LAYOUT_RECORD; + + +/****************************************************************************/ +// Structure used in XXXX +/****************************************************************************/ +typedef struct _ATOM_VOLTAGE_INFO_HEADER +{ + USHORT usVDDCBaseLevel; //In number of 50mv unit + USHORT usReserved; //For possible extension table offset + UCHAR ucNumOfVoltageEntries; + UCHAR ucBytesPerVoltageEntry; + UCHAR ucVoltageStep; //Indicating in how many mv increament is one step, 0.5mv unit + UCHAR ucDefaultVoltageEntry; + UCHAR ucVoltageControlI2cLine; + UCHAR ucVoltageControlAddress; + UCHAR ucVoltageControlOffset; +}ATOM_VOLTAGE_INFO_HEADER; + +typedef struct _ATOM_VOLTAGE_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_VOLTAGE_INFO_HEADER viHeader; + UCHAR ucVoltageEntries[64]; //64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry +}ATOM_VOLTAGE_INFO; + + +typedef struct _ATOM_VOLTAGE_FORMULA +{ + USHORT usVoltageBaseLevel; // In number of 1mv unit + USHORT usVoltageStep; // Indicating in how many mv increament is one step, 1mv unit + UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage + UCHAR ucFlag; // bit0=0 :step is 1mv =1 0.5mv + UCHAR ucBaseVID; // if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep + UCHAR ucReserved; + UCHAR ucVIDAdjustEntries[32]; // 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries +}ATOM_VOLTAGE_FORMULA; + +typedef struct _VOLTAGE_LUT_ENTRY +{ + USHORT usVoltageCode; // The Voltage ID, either GPIO or I2C code + USHORT usVoltageValue; // The corresponding Voltage Value, in mV +}VOLTAGE_LUT_ENTRY; + +typedef struct _ATOM_VOLTAGE_FORMULA_V2 +{ + UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage + UCHAR ucReserved[3]; + VOLTAGE_LUT_ENTRY asVIDAdjustEntries[32];// 32 is for allocation, the actual number of entries is in ucNumOfVoltageEntries +}ATOM_VOLTAGE_FORMULA_V2; + +typedef struct _ATOM_VOLTAGE_CONTROL +{ + UCHAR ucVoltageControlId; //Indicate it is controlled by I2C or GPIO or HW state machine + UCHAR ucVoltageControlI2cLine; + UCHAR ucVoltageControlAddress; + UCHAR ucVoltageControlOffset; + USHORT usGpioPin_AIndex; //GPIO_PAD register index + UCHAR ucGpioPinBitShift[9]; //at most 8 pin support 255 VIDs, termintate with 0xff + UCHAR ucReserved; +}ATOM_VOLTAGE_CONTROL; + +// Define ucVoltageControlId +#define VOLTAGE_CONTROLLED_BY_HW 0x00 +#define VOLTAGE_CONTROLLED_BY_I2C_MASK 0x7F +#define VOLTAGE_CONTROLLED_BY_GPIO 0x80 +#define VOLTAGE_CONTROL_ID_LM64 0x01 //I2C control, used for R5xx Core Voltage +#define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI +#define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage +#define VOLTAGE_CONTROL_ID_DS4402 0x04 +#define VOLTAGE_CONTROL_ID_UP6266 0x05 +#define VOLTAGE_CONTROL_ID_SCORPIO 0x06 +#define VOLTAGE_CONTROL_ID_VT1556M 0x07 +#define VOLTAGE_CONTROL_ID_CHL822x 0x08 +#define VOLTAGE_CONTROL_ID_VT1586M 0x09 +#define VOLTAGE_CONTROL_ID_UP1637 0x0A +#define VOLTAGE_CONTROL_ID_CHL8214 0x0B +#define VOLTAGE_CONTROL_ID_UP1801 0x0C +#define VOLTAGE_CONTROL_ID_ST6788A 0x0D +#define VOLTAGE_CONTROL_ID_CHLIR3564SVI2 0x0E +#define VOLTAGE_CONTROL_ID_AD527x 0x0F +#define VOLTAGE_CONTROL_ID_NCP81022 0x10 +#define VOLTAGE_CONTROL_ID_LTC2635 0x11 +#define VOLTAGE_CONTROL_ID_NCP4208 0x12 +#define VOLTAGE_CONTROL_ID_IR35xx 0x13 +#define VOLTAGE_CONTROL_ID_RT9403 0x14 + +#define VOLTAGE_CONTROL_ID_GENERIC_I2C 0x40 + +typedef struct _ATOM_VOLTAGE_OBJECT +{ + UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI + UCHAR ucSize; //Size of Object + ATOM_VOLTAGE_CONTROL asControl; //describ how to control + ATOM_VOLTAGE_FORMULA asFormula; //Indicate How to convert real Voltage to VID +}ATOM_VOLTAGE_OBJECT; + +typedef struct _ATOM_VOLTAGE_OBJECT_V2 +{ + UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI + UCHAR ucSize; //Size of Object + ATOM_VOLTAGE_CONTROL asControl; //describ how to control + ATOM_VOLTAGE_FORMULA_V2 asFormula; //Indicate How to convert real Voltage to VID +}ATOM_VOLTAGE_OBJECT_V2; + +typedef struct _ATOM_VOLTAGE_OBJECT_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_VOLTAGE_OBJECT asVoltageObj[3]; //Info for Voltage control +}ATOM_VOLTAGE_OBJECT_INFO; + +typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_VOLTAGE_OBJECT_V2 asVoltageObj[3]; //Info for Voltage control +}ATOM_VOLTAGE_OBJECT_INFO_V2; + +typedef struct _ATOM_LEAKID_VOLTAGE +{ + UCHAR ucLeakageId; + UCHAR ucReserved; + USHORT usVoltage; +}ATOM_LEAKID_VOLTAGE; + +typedef struct _ATOM_VOLTAGE_OBJECT_HEADER_V3{ + UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI + UCHAR ucVoltageMode; //Indicate voltage control mode: Init/Set/Leakage/Set phase + USHORT usSize; //Size of Object +}ATOM_VOLTAGE_OBJECT_HEADER_V3; + +// ATOM_VOLTAGE_OBJECT_HEADER_V3.ucVoltageMode +#define VOLTAGE_OBJ_GPIO_LUT 0 //VOLTAGE and GPIO Lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3 +#define VOLTAGE_OBJ_VR_I2C_INIT_SEQ 3 //VOLTAGE REGULATOR INIT sequece through I2C -> ATOM_I2C_VOLTAGE_OBJECT_V3 +#define VOLTAGE_OBJ_PHASE_LUT 4 //Set Vregulator Phase lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3 +#define VOLTAGE_OBJ_SVID2 7 //Indicate voltage control by SVID2 ->ATOM_SVID2_VOLTAGE_OBJECT_V3 +#define VOLTAGE_OBJ_EVV 8 +#define VOLTAGE_OBJ_PWRBOOST_LEAKAGE_LUT 0x10 //Powerboost Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 +#define VOLTAGE_OBJ_HIGH_STATE_LEAKAGE_LUT 0x11 //High voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 +#define VOLTAGE_OBJ_HIGH1_STATE_LEAKAGE_LUT 0x12 //High1 voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 + +typedef struct _VOLTAGE_LUT_ENTRY_V2 +{ + ULONG ulVoltageId; // The Voltage ID which is used to program GPIO register + USHORT usVoltageValue; // The corresponding Voltage Value, in mV +}VOLTAGE_LUT_ENTRY_V2; + +typedef struct _LEAKAGE_VOLTAGE_LUT_ENTRY_V2 +{ + USHORT usVoltageLevel; // The Voltage ID which is used to program GPIO register + USHORT usVoltageId; + USHORT usLeakageId; // The corresponding Voltage Value, in mV +}LEAKAGE_VOLTAGE_LUT_ENTRY_V2; + + +typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3 +{ + ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_VR_I2C_INIT_SEQ + UCHAR ucVoltageRegulatorId; //Indicate Voltage Regulator Id + UCHAR ucVoltageControlI2cLine; + UCHAR ucVoltageControlAddress; + UCHAR ucVoltageControlOffset; + UCHAR ucVoltageControlFlag; // Bit0: 0 - One byte data; 1 - Two byte data + UCHAR ulReserved[3]; + VOLTAGE_LUT_ENTRY asVolI2cLut[1]; // end with 0xff +}ATOM_I2C_VOLTAGE_OBJECT_V3; + +// ATOM_I2C_VOLTAGE_OBJECT_V3.ucVoltageControlFlag +#define VOLTAGE_DATA_ONE_BYTE 0 +#define VOLTAGE_DATA_TWO_BYTE 1 + +typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3 +{ + ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT + UCHAR ucVoltageGpioCntlId; // default is 0 which indicate control through CG VID mode + UCHAR ucGpioEntryNum; // indiate the entry numbers of Votlage/Gpio value Look up table + UCHAR ucPhaseDelay; // phase delay in unit of micro second + UCHAR ucReserved; + ULONG ulGpioMaskVal; // GPIO Mask value + VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[1]; +}ATOM_GPIO_VOLTAGE_OBJECT_V3; + +typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 +{ + ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = 0x10/0x11/0x12 + UCHAR ucLeakageCntlId; // default is 0 + UCHAR ucLeakageEntryNum; // indicate the entry number of LeakageId/Voltage Lut table + UCHAR ucReserved[2]; + ULONG ulMaxVoltageLevel; + LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1]; +}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3; + + +typedef struct _ATOM_SVID2_VOLTAGE_OBJECT_V3 +{ + ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_SVID2 +// 14:7 � PSI0_VID +// 6 � PSI0_EN +// 5 � PSI1 +// 4:2 � load line slope trim. +// 1:0 � offset trim, + USHORT usLoadLine_PSI; +// GPU GPIO pin Id to SVID2 regulator VRHot pin. possible value 0~31. 0 means GPIO0, 31 means GPIO31 + UCHAR ucSVDGpioId; //0~31 indicate GPIO0~31 + UCHAR ucSVCGpioId; //0~31 indicate GPIO0~31 + ULONG ulReserved; +}ATOM_SVID2_VOLTAGE_OBJECT_V3; + +typedef union _ATOM_VOLTAGE_OBJECT_V3{ + ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj; + ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj; + ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj; + ATOM_SVID2_VOLTAGE_OBJECT_V3 asSVID2Obj; +}ATOM_VOLTAGE_OBJECT_V3; + +typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_VOLTAGE_OBJECT_V3 asVoltageObj[3]; //Info for Voltage control +}ATOM_VOLTAGE_OBJECT_INFO_V3_1; + + +typedef struct _ATOM_ASIC_PROFILE_VOLTAGE +{ + UCHAR ucProfileId; + UCHAR ucReserved; + USHORT usSize; + USHORT usEfuseSpareStartAddr; + USHORT usFuseIndex[8]; //from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id, + ATOM_LEAKID_VOLTAGE asLeakVol[2]; //Leakid and relatd voltage +}ATOM_ASIC_PROFILE_VOLTAGE; + +//ucProfileId +#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1 +#define ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE 1 +#define ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE 2 + +typedef struct _ATOM_ASIC_PROFILING_INFO +{ + ATOM_COMMON_TABLE_HEADER asHeader; + ATOM_ASIC_PROFILE_VOLTAGE asVoltage; +}ATOM_ASIC_PROFILING_INFO; + +typedef struct _ATOM_ASIC_PROFILING_INFO_V2_1 +{ + ATOM_COMMON_TABLE_HEADER asHeader; + UCHAR ucLeakageBinNum; // indicate the entry number of LeakageId/Voltage Lut table + USHORT usLeakageBinArrayOffset; // offset of USHORT Leakage Bin list array ( from lower LeakageId to higher) + + UCHAR ucElbVDDC_Num; + USHORT usElbVDDC_IdArrayOffset; // offset of USHORT virtual VDDC voltage id ( 0xff01~0xff08 ) + USHORT usElbVDDC_LevelArrayOffset; // offset of 2 dimension voltage level USHORT array + + UCHAR ucElbVDDCI_Num; + USHORT usElbVDDCI_IdArrayOffset; // offset of USHORT virtual VDDCI voltage id ( 0xff01~0xff08 ) + USHORT usElbVDDCI_LevelArrayOffset; // offset of 2 dimension voltage level USHORT array +}ATOM_ASIC_PROFILING_INFO_V2_1; + + +//Here is parameter to convert Efuse value to Measure value +//Measured = LN((2^Bitsize-1)/EFUSE-1)*(Range)/(-alpha)+(Max+Min)/2 +typedef struct _EFUSE_LOGISTIC_FUNC_PARAM +{ + USHORT usEfuseIndex; // Efuse Index in DWORD address, for example Index 911, usEuseIndex=112 + UCHAR ucEfuseBitLSB; // Efuse bit LSB in DWORD address, for example Index 911, usEfuseBitLSB= 911-112*8=15 + UCHAR ucEfuseLength; // Efuse bits length, + ULONG ulEfuseEncodeRange; // Range = Max - Min, bit31 indicate the efuse is negative number + ULONG ulEfuseEncodeAverage; // Average = ( Max + Min )/2 +}EFUSE_LOGISTIC_FUNC_PARAM; + +//Linear Function: Measured = Round ( Efuse * ( Max-Min )/(2^BitSize -1 ) + Min ) +typedef struct _EFUSE_LINEAR_FUNC_PARAM +{ + USHORT usEfuseIndex; // Efuse Index in DWORD address, for example Index 911, usEuseIndex=112 + UCHAR ucEfuseBitLSB; // Efuse bit LSB in DWORD address, for example Index 911, usEfuseBitLSB= 911-112*8=15 + UCHAR ucEfuseLength; // Efuse bits length, + ULONG ulEfuseEncodeRange; // Range = Max - Min, bit31 indicate the efuse is negative number + ULONG ulEfuseMin; // Min +}EFUSE_LINEAR_FUNC_PARAM; + + +typedef struct _ATOM_ASIC_PROFILING_INFO_V3_1 +{ + ATOM_COMMON_TABLE_HEADER asHeader; + ULONG ulEvvDerateTdp; + ULONG ulEvvDerateTdc; + ULONG ulBoardCoreTemp; + ULONG ulMaxVddc; + ULONG ulMinVddc; + ULONG ulLoadLineSlop; + ULONG ulLeakageTemp; + ULONG ulLeakageVoltage; + EFUSE_LINEAR_FUNC_PARAM sCACm; + EFUSE_LINEAR_FUNC_PARAM sCACb; + EFUSE_LOGISTIC_FUNC_PARAM sKt_b; + EFUSE_LOGISTIC_FUNC_PARAM sKv_m; + EFUSE_LOGISTIC_FUNC_PARAM sKv_b; + USHORT usLkgEuseIndex; + UCHAR ucLkgEfuseBitLSB; + UCHAR ucLkgEfuseLength; + ULONG ulLkgEncodeLn_MaxDivMin; + ULONG ulLkgEncodeMax; + ULONG ulLkgEncodeMin; + ULONG ulEfuseLogisticAlpha; + USHORT usPowerDpm0; + USHORT usCurrentDpm0; + USHORT usPowerDpm1; + USHORT usCurrentDpm1; + USHORT usPowerDpm2; + USHORT usCurrentDpm2; + USHORT usPowerDpm3; + USHORT usCurrentDpm3; + USHORT usPowerDpm4; + USHORT usCurrentDpm4; + USHORT usPowerDpm5; + USHORT usCurrentDpm5; + USHORT usPowerDpm6; + USHORT usCurrentDpm6; + USHORT usPowerDpm7; + USHORT usCurrentDpm7; +}ATOM_ASIC_PROFILING_INFO_V3_1; + + +typedef struct _ATOM_ASIC_PROFILING_INFO_V3_2 +{ + ATOM_COMMON_TABLE_HEADER asHeader; + ULONG ulEvvLkgFactor; + ULONG ulBoardCoreTemp; + ULONG ulMaxVddc; + ULONG ulMinVddc; + ULONG ulLoadLineSlop; + ULONG ulLeakageTemp; + ULONG ulLeakageVoltage; + EFUSE_LINEAR_FUNC_PARAM sCACm; + EFUSE_LINEAR_FUNC_PARAM sCACb; + EFUSE_LOGISTIC_FUNC_PARAM sKt_b; + EFUSE_LOGISTIC_FUNC_PARAM sKv_m; + EFUSE_LOGISTIC_FUNC_PARAM sKv_b; + USHORT usLkgEuseIndex; + UCHAR ucLkgEfuseBitLSB; + UCHAR ucLkgEfuseLength; + ULONG ulLkgEncodeLn_MaxDivMin; + ULONG ulLkgEncodeMax; + ULONG ulLkgEncodeMin; + ULONG ulEfuseLogisticAlpha; + USHORT usPowerDpm0; + USHORT usPowerDpm1; + USHORT usPowerDpm2; + USHORT usPowerDpm3; + USHORT usPowerDpm4; + USHORT usPowerDpm5; + USHORT usPowerDpm6; + USHORT usPowerDpm7; + ULONG ulTdpDerateDPM0; + ULONG ulTdpDerateDPM1; + ULONG ulTdpDerateDPM2; + ULONG ulTdpDerateDPM3; + ULONG ulTdpDerateDPM4; + ULONG ulTdpDerateDPM5; + ULONG ulTdpDerateDPM6; + ULONG ulTdpDerateDPM7; +}ATOM_ASIC_PROFILING_INFO_V3_2; + + +// for Tonga/Fiji speed EVV algorithm +typedef struct _ATOM_ASIC_PROFILING_INFO_V3_3 +{ + ATOM_COMMON_TABLE_HEADER asHeader; + ULONG ulEvvLkgFactor; + ULONG ulBoardCoreTemp; + ULONG ulMaxVddc; + ULONG ulMinVddc; + ULONG ulLoadLineSlop; + ULONG ulLeakageTemp; + ULONG ulLeakageVoltage; + EFUSE_LINEAR_FUNC_PARAM sCACm; + EFUSE_LINEAR_FUNC_PARAM sCACb; + EFUSE_LOGISTIC_FUNC_PARAM sKt_b; + EFUSE_LOGISTIC_FUNC_PARAM sKv_m; + EFUSE_LOGISTIC_FUNC_PARAM sKv_b; + USHORT usLkgEuseIndex; + UCHAR ucLkgEfuseBitLSB; + UCHAR ucLkgEfuseLength; + ULONG ulLkgEncodeLn_MaxDivMin; + ULONG ulLkgEncodeMax; + ULONG ulLkgEncodeMin; + ULONG ulEfuseLogisticAlpha; + USHORT usPowerDpm0; + USHORT usPowerDpm1; + USHORT usPowerDpm2; + USHORT usPowerDpm3; + USHORT usPowerDpm4; + USHORT usPowerDpm5; + USHORT usPowerDpm6; + USHORT usPowerDpm7; + ULONG ulTdpDerateDPM0; + ULONG ulTdpDerateDPM1; + ULONG ulTdpDerateDPM2; + ULONG ulTdpDerateDPM3; + ULONG ulTdpDerateDPM4; + ULONG ulTdpDerateDPM5; + ULONG ulTdpDerateDPM6; + ULONG ulTdpDerateDPM7; + EFUSE_LINEAR_FUNC_PARAM sRoFuse; + ULONG ulRoAlpha; + ULONG ulRoBeta; + ULONG ulRoGamma; + ULONG ulRoEpsilon; + ULONG ulATermRo; + ULONG ulBTermRo; + ULONG ulCTermRo; + ULONG ulSclkMargin; + ULONG ulFmaxPercent; + ULONG ulCRPercent; + ULONG ulSFmaxPercent; + ULONG ulSCRPercent; + ULONG ulSDCMargine; +}ATOM_ASIC_PROFILING_INFO_V3_3; + +typedef struct _ATOM_POWER_SOURCE_OBJECT +{ + UCHAR ucPwrSrcId; // Power source + UCHAR ucPwrSensorType; // GPIO, I2C or none + UCHAR ucPwrSensId; // if GPIO detect, it is GPIO id, if I2C detect, it is I2C id + UCHAR ucPwrSensSlaveAddr; // Slave address if I2C detect + UCHAR ucPwrSensRegIndex; // I2C register Index if I2C detect + UCHAR ucPwrSensRegBitMask; // detect which bit is used if I2C detect + UCHAR ucPwrSensActiveState; // high active or low active + UCHAR ucReserve[3]; // reserve + USHORT usSensPwr; // in unit of watt +}ATOM_POWER_SOURCE_OBJECT; + +typedef struct _ATOM_POWER_SOURCE_INFO +{ + ATOM_COMMON_TABLE_HEADER asHeader; + UCHAR asPwrbehave[16]; + ATOM_POWER_SOURCE_OBJECT asPwrObj[1]; +}ATOM_POWER_SOURCE_INFO; + + +//Define ucPwrSrcId +#define POWERSOURCE_PCIE_ID1 0x00 +#define POWERSOURCE_6PIN_CONNECTOR_ID1 0x01 +#define POWERSOURCE_8PIN_CONNECTOR_ID1 0x02 +#define POWERSOURCE_6PIN_CONNECTOR_ID2 0x04 +#define POWERSOURCE_8PIN_CONNECTOR_ID2 0x08 + +//define ucPwrSensorId +#define POWER_SENSOR_ALWAYS 0x00 +#define POWER_SENSOR_GPIO 0x01 +#define POWER_SENSOR_I2C 0x02 + +typedef struct _ATOM_CLK_VOLT_CAPABILITY +{ + ULONG ulVoltageIndex; // The Voltage Index indicated by FUSE, same voltage index shared with SCLK DPM fuse table + ULONG ulMaximumSupportedCLK; // Maximum clock supported with specified voltage index, unit in 10kHz +}ATOM_CLK_VOLT_CAPABILITY; + + +typedef struct _ATOM_CLK_VOLT_CAPABILITY_V2 +{ + USHORT usVoltageLevel; // The real Voltage Level round up value in unit of mv, + ULONG ulMaximumSupportedCLK; // Maximum clock supported with specified voltage index, unit in 10kHz +}ATOM_CLK_VOLT_CAPABILITY_V2; + +typedef struct _ATOM_AVAILABLE_SCLK_LIST +{ + ULONG ulSupportedSCLK; // Maximum clock supported with specified voltage index, unit in 10kHz + USHORT usVoltageIndex; // The Voltage Index indicated by FUSE for specified SCLK + USHORT usVoltageID; // The Voltage ID indicated by FUSE for specified SCLK +}ATOM_AVAILABLE_SCLK_LIST; + +// ATOM_INTEGRATED_SYSTEM_INFO_V6 ulSystemConfig cap definition +#define ATOM_IGP_INFO_V6_SYSTEM_CONFIG__PCIE_POWER_GATING_ENABLE 1 // refer to ulSystemConfig bit[0] + +// this IntegrateSystemInfoTable is used for Liano/Ontario APU +typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulBootUpEngineClock; + ULONG ulDentistVCOFreq; + ULONG ulBootUpUMAClock; + ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4]; + ULONG ulBootUpReqDisplayVector; + ULONG ulOtherDisplayMisc; + ULONG ulGPUCapInfo; + ULONG ulSB_MMIO_Base_Addr; + USHORT usRequestedPWMFreqInHz; + UCHAR ucHtcTmpLmt; + UCHAR ucHtcHystLmt; + ULONG ulMinEngineClock; + ULONG ulSystemConfig; + ULONG ulCPUCapInfo; + USHORT usNBP0Voltage; + USHORT usNBP1Voltage; + USHORT usBootUpNBVoltage; + USHORT usExtDispConnInfoOffset; + USHORT usPanelRefreshRateRange; + UCHAR ucMemoryType; + UCHAR ucUMAChannelNumber; + ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10]; + ULONG ulCSR_M3_ARB_CNTL_UVD[10]; + ULONG ulCSR_M3_ARB_CNTL_FS3D[10]; + ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5]; + ULONG ulGMCRestoreResetTime; + ULONG ulMinimumNClk; + ULONG ulIdleNClk; + ULONG ulDDR_DLL_PowerUpTime; + ULONG ulDDR_PLL_PowerUpTime; + USHORT usPCIEClkSSPercentage; + USHORT usPCIEClkSSType; + USHORT usLvdsSSPercentage; + USHORT usLvdsSSpreadRateIn10Hz; + USHORT usHDMISSPercentage; + USHORT usHDMISSpreadRateIn10Hz; + USHORT usDVISSPercentage; + USHORT usDVISSpreadRateIn10Hz; + ULONG SclkDpmBoostMargin; + ULONG SclkDpmThrottleMargin; + USHORT SclkDpmTdpLimitPG; + USHORT SclkDpmTdpLimitBoost; + ULONG ulBoostEngineCLock; + UCHAR ulBoostVid_2bit; + UCHAR EnableBoost; + USHORT GnbTdpLimit; + USHORT usMaxLVDSPclkFreqInSingleLink; + UCHAR ucLvdsMisc; + UCHAR ucLVDSReserved; + ULONG ulReserved3[15]; + ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo; +}ATOM_INTEGRATED_SYSTEM_INFO_V6; + +// ulGPUCapInfo +#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01 +#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION 0x08 + +//ucLVDSMisc: +#define SYS_INFO_LVDSMISC__888_FPDI_MODE 0x01 +#define SYS_INFO_LVDSMISC__DL_CH_SWAP 0x02 +#define SYS_INFO_LVDSMISC__888_BPC 0x04 +#define SYS_INFO_LVDSMISC__OVERRIDE_EN 0x08 +#define SYS_INFO_LVDSMISC__BLON_ACTIVE_LOW 0x10 +// new since Trinity +#define SYS_INFO_LVDSMISC__TRAVIS_LVDS_VOL_OVERRIDE_EN 0x20 + +// not used any more +#define SYS_INFO_LVDSMISC__VSYNC_ACTIVE_LOW 0x04 +#define SYS_INFO_LVDSMISC__HSYNC_ACTIVE_LOW 0x08 + +/********************************************************************************************************************** + ATOM_INTEGRATED_SYSTEM_INFO_V6 Description +ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock +ulDentistVCOFreq: Dentist VCO clock in 10kHz unit. +ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit. +sDISPCLK_Voltage: Report Display clock voltage requirement. + +ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Liano/Ontaio projects: + ATOM_DEVICE_CRT1_SUPPORT 0x0001 + ATOM_DEVICE_CRT2_SUPPORT 0x0010 + ATOM_DEVICE_DFP1_SUPPORT 0x0008 + ATOM_DEVICE_DFP6_SUPPORT 0x0040 + ATOM_DEVICE_DFP2_SUPPORT 0x0080 + ATOM_DEVICE_DFP3_SUPPORT 0x0200 + ATOM_DEVICE_DFP4_SUPPORT 0x0400 + ATOM_DEVICE_DFP5_SUPPORT 0x0800 + ATOM_DEVICE_LCD1_SUPPORT 0x0002 +ulOtherDisplayMisc: Other display related flags, not defined yet. +ulGPUCapInfo: bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode. + =1: TMDS/HDMI Coherent Mode use signel PLL mode. + bit[3]=0: Enable HW AUX mode detection logic + =1: Disable HW AUX mode dettion logic +ulSB_MMIO_Base_Addr: Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage. + +usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). + Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0; + + When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below: + 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use; + VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result, + Changing BL using VBIOS function is functional in both driver and non-driver present environment; + and enabling VariBri under the driver environment from PP table is optional. + + 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating + that BL control from GPU is expected. + VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1 + Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but + it's per platform + and enabling VariBri under the driver environment from PP table is optional. + +ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt. + Threshold on value to enter HTC_active state. +ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt. + To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt. +ulMinEngineClock: Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings. +ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled + =1: PCIE Power Gating Enabled + Bit[1]=0: DDR-DLL shut-down feature disabled. + 1: DDR-DLL shut-down feature enabled. + Bit[2]=0: DDR-PLL Power down feature disabled. + 1: DDR-PLL Power down feature enabled. +ulCPUCapInfo: TBD +usNBP0Voltage: VID for voltage on NB P0 State +usNBP1Voltage: VID for voltage on NB P1 State +usBootUpNBVoltage: Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement. +usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure +usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set + to indicate a range. + SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004 + SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008 + SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010 + SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020 +ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved. +ucUMAChannelNumber: System memory channel numbers. +ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default +ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback. +ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications. +sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high +ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. +ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz. +ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz. +ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns. +ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns. +usPCIEClkSSPercentage: PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%. +usPCIEClkSSType: PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread. +usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. +usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. +usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting. +usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. +usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting. +usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. +usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz +ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode + [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped + [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color + [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used + [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low ) +**********************************************************************************************************************/ + +// this Table is used for Liano/Ontario APU +typedef struct _ATOM_FUSION_SYSTEM_INFO_V1 +{ + ATOM_INTEGRATED_SYSTEM_INFO_V6 sIntegratedSysInfo; + ULONG ulPowerplayTable[128]; +}ATOM_FUSION_SYSTEM_INFO_V1; + + +typedef struct _ATOM_TDP_CONFIG_BITS +{ +#if ATOM_BIG_ENDIAN + ULONG uReserved:2; + ULONG uTDP_Value:14; // Original TDP value in tens of milli watts + ULONG uCTDP_Value:14; // Override value in tens of milli watts + ULONG uCTDP_Enable:2; // = (uCTDP_Value > uTDP_Value? 2: (uCTDP_Value < uTDP_Value)) +#else + ULONG uCTDP_Enable:2; // = (uCTDP_Value > uTDP_Value? 2: (uCTDP_Value < uTDP_Value)) + ULONG uCTDP_Value:14; // Override value in tens of milli watts + ULONG uTDP_Value:14; // Original TDP value in tens of milli watts + ULONG uReserved:2; +#endif +}ATOM_TDP_CONFIG_BITS; + +typedef union _ATOM_TDP_CONFIG +{ + ATOM_TDP_CONFIG_BITS TDP_config; + ULONG TDP_config_all; +}ATOM_TDP_CONFIG; + +/********************************************************************************************************************** + ATOM_FUSION_SYSTEM_INFO_V1 Description +sIntegratedSysInfo: refer to ATOM_INTEGRATED_SYSTEM_INFO_V6 definition. +ulPowerplayTable[128]: This 512 bytes memory is used to save ATOM_PPLIB_POWERPLAYTABLE3, starting form ulPowerplayTable[0] +**********************************************************************************************************************/ + +// this IntegrateSystemInfoTable is used for Trinity APU +typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulBootUpEngineClock; + ULONG ulDentistVCOFreq; + ULONG ulBootUpUMAClock; + ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4]; + ULONG ulBootUpReqDisplayVector; + ULONG ulOtherDisplayMisc; + ULONG ulGPUCapInfo; + ULONG ulSB_MMIO_Base_Addr; + USHORT usRequestedPWMFreqInHz; + UCHAR ucHtcTmpLmt; + UCHAR ucHtcHystLmt; + ULONG ulMinEngineClock; + ULONG ulSystemConfig; + ULONG ulCPUCapInfo; + USHORT usNBP0Voltage; + USHORT usNBP1Voltage; + USHORT usBootUpNBVoltage; + USHORT usExtDispConnInfoOffset; + USHORT usPanelRefreshRateRange; + UCHAR ucMemoryType; + UCHAR ucUMAChannelNumber; + UCHAR strVBIOSMsg[40]; + ATOM_TDP_CONFIG asTdpConfig; + ULONG ulReserved[19]; + ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5]; + ULONG ulGMCRestoreResetTime; + ULONG ulMinimumNClk; + ULONG ulIdleNClk; + ULONG ulDDR_DLL_PowerUpTime; + ULONG ulDDR_PLL_PowerUpTime; + USHORT usPCIEClkSSPercentage; + USHORT usPCIEClkSSType; + USHORT usLvdsSSPercentage; + USHORT usLvdsSSpreadRateIn10Hz; + USHORT usHDMISSPercentage; + USHORT usHDMISSpreadRateIn10Hz; + USHORT usDVISSPercentage; + USHORT usDVISSpreadRateIn10Hz; + ULONG SclkDpmBoostMargin; + ULONG SclkDpmThrottleMargin; + USHORT SclkDpmTdpLimitPG; + USHORT SclkDpmTdpLimitBoost; + ULONG ulBoostEngineCLock; + UCHAR ulBoostVid_2bit; + UCHAR EnableBoost; + USHORT GnbTdpLimit; + USHORT usMaxLVDSPclkFreqInSingleLink; + UCHAR ucLvdsMisc; + UCHAR ucTravisLVDSVolAdjust; + UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms; + UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms; + UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms; + UCHAR ucLVDSPwrOffSeqDEtoDIGON_in4Ms; + UCHAR ucLVDSOffToOnDelay_in4Ms; + UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms; + UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms; + UCHAR ucMinAllowedBL_Level; + ULONG ulLCDBitDepthControlVal; + ULONG ulNbpStateMemclkFreq[4]; + USHORT usNBP2Voltage; + USHORT usNBP3Voltage; + ULONG ulNbpStateNClkFreq[4]; + UCHAR ucNBDPMEnable; + UCHAR ucReserved[3]; + UCHAR ucDPMState0VclkFid; + UCHAR ucDPMState0DclkFid; + UCHAR ucDPMState1VclkFid; + UCHAR ucDPMState1DclkFid; + UCHAR ucDPMState2VclkFid; + UCHAR ucDPMState2DclkFid; + UCHAR ucDPMState3VclkFid; + UCHAR ucDPMState3DclkFid; + ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo; +}ATOM_INTEGRATED_SYSTEM_INFO_V1_7; + +// ulOtherDisplayMisc +#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01 +#define INTEGRATED_SYSTEM_INFO__GET_BOOTUP_DISPLAY_CALLBACK_FUNC_SUPPORT 0x02 +#define INTEGRATED_SYSTEM_INFO__GET_EXPANSION_CALLBACK_FUNC_SUPPORT 0x04 +#define INTEGRATED_SYSTEM_INFO__FAST_BOOT_SUPPORT 0x08 + +// ulGPUCapInfo +#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01 +#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02 +#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08 +#define SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS 0x10 +//ulGPUCapInfo[16]=1 indicate SMC firmware is able to support GNB fast resume function, so that driver can call SMC to program most of GNB register during resuming, from ML +#define SYS_INFO_GPUCAPS__GNB_FAST_RESUME_CAPABLE 0x00010000 + +//ulGPUCapInfo[17]=1 indicate battery boost feature is enable, from ML +#define SYS_INFO_GPUCAPS__BATTERY_BOOST_ENABLE 0x00020000 + +/********************************************************************************************************************** + ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description +ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock +ulDentistVCOFreq: Dentist VCO clock in 10kHz unit. +ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit. +sDISPCLK_Voltage: Report Display clock voltage requirement. + +ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Trinity projects: + ATOM_DEVICE_CRT1_SUPPORT 0x0001 + ATOM_DEVICE_DFP1_SUPPORT 0x0008 + ATOM_DEVICE_DFP6_SUPPORT 0x0040 + ATOM_DEVICE_DFP2_SUPPORT 0x0080 + ATOM_DEVICE_DFP3_SUPPORT 0x0200 + ATOM_DEVICE_DFP4_SUPPORT 0x0400 + ATOM_DEVICE_DFP5_SUPPORT 0x0800 + ATOM_DEVICE_LCD1_SUPPORT 0x0002 +ulOtherDisplayMisc: bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS. + =1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS. + bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS + =1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS + bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS + =1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS + bit[3]=0: VBIOS fast boot is disable + =1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open) +ulGPUCapInfo: bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode. + =1: TMDS/HDMI Coherent Mode use signel PLL mode. + bit[1]=0: DP mode use cascade PLL mode ( New for Trinity ) + =1: DP mode use single PLL mode + bit[3]=0: Enable AUX HW mode detection logic + =1: Disable AUX HW mode detection logic + +ulSB_MMIO_Base_Addr: Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage. + +usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). + Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0; + + When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below: + 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use; + VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result, + Changing BL using VBIOS function is functional in both driver and non-driver present environment; + and enabling VariBri under the driver environment from PP table is optional. + + 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating + that BL control from GPU is expected. + VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1 + Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but + it's per platform + and enabling VariBri under the driver environment from PP table is optional. + +ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt. + Threshold on value to enter HTC_active state. +ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt. + To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt. +ulMinEngineClock: Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings. +ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled + =1: PCIE Power Gating Enabled + Bit[1]=0: DDR-DLL shut-down feature disabled. + 1: DDR-DLL shut-down feature enabled. + Bit[2]=0: DDR-PLL Power down feature disabled. + 1: DDR-PLL Power down feature enabled. +ulCPUCapInfo: TBD +usNBP0Voltage: VID for voltage on NB P0 State +usNBP1Voltage: VID for voltage on NB P1 State +usNBP2Voltage: VID for voltage on NB P2 State +usNBP3Voltage: VID for voltage on NB P3 State +usBootUpNBVoltage: Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement. +usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure +usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set + to indicate a range. + SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004 + SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008 + SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010 + SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020 +ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved. +ucUMAChannelNumber: System memory channel numbers. +ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default +ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback. +ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications. +sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high +ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. +ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz. +ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz. +ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns. +ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns. +usPCIEClkSSPercentage: PCIE Clock Spread Spectrum Percentage in unit 0.01%; 100 mean 1%. +usPCIEClkSSType: PCIE Clock Spread Spectrum Type. 0 for Down spread(default); 1 for Center spread. +usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. +usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. +usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting. +usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. +usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting. +usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. +usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz +ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode + [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped + [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color + [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used + [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low ) + [bit5] Travid LVDS output voltage override enable, when =1, use ucTravisLVDSVolAdjust value to overwrite Traivs register LVDS_CTRL_4 +ucTravisLVDSVolAdjust When ucLVDSMisc[5]=1,it means platform SBIOS want to overwrite TravisLVDSVoltage. Then VBIOS will use ucTravisLVDSVolAdjust + value to program Travis register LVDS_CTRL_4 +ucLVDSPwrOnSeqDIGONtoDE_in4Ms: LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ). + =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON. + This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. +ucLVDSPwrOnDEtoVARY_BL_in4Ms: LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ). + =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON. + This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. + +ucLVDSPwrOffVARY_BLtoDE_in4Ms: LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off. + =0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON + This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. + +ucLVDSPwrOffDEtoDIGON_in4Ms: LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off. + =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON + This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. + +ucLVDSOffToOnDelay_in4Ms: LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active. + =0 means to use VBIOS default delay which is 125 ( 500ms ). + This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. + +ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms: + LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active. + =0 means to use VBIOS default delay which is 0 ( 0ms ). + This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. + +ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms: + LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off. + =0 means to use VBIOS default delay which is 0 ( 0ms ). + This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. + +ucMinAllowedBL_Level: Lowest LCD backlight PWM level. This is customer platform specific parameters. By default it is 0. + +ulNbpStateMemclkFreq[4]: system memory clock frequncey in unit of 10Khz in different NB pstate. + +**********************************************************************************************************************/ + +// this IntegrateSystemInfoTable is used for Kaveri & Kabini APU +typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulBootUpEngineClock; + ULONG ulDentistVCOFreq; + ULONG ulBootUpUMAClock; + ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4]; + ULONG ulBootUpReqDisplayVector; + ULONG ulVBIOSMisc; + ULONG ulGPUCapInfo; + ULONG ulDISP_CLK2Freq; + USHORT usRequestedPWMFreqInHz; + UCHAR ucHtcTmpLmt; + UCHAR ucHtcHystLmt; + ULONG ulReserved2; + ULONG ulSystemConfig; + ULONG ulCPUCapInfo; + ULONG ulReserved3; + USHORT usGPUReservedSysMemSize; + USHORT usExtDispConnInfoOffset; + USHORT usPanelRefreshRateRange; + UCHAR ucMemoryType; + UCHAR ucUMAChannelNumber; + UCHAR strVBIOSMsg[40]; + ATOM_TDP_CONFIG asTdpConfig; + ULONG ulReserved[19]; + ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5]; + ULONG ulGMCRestoreResetTime; + ULONG ulReserved4; + ULONG ulIdleNClk; + ULONG ulDDR_DLL_PowerUpTime; + ULONG ulDDR_PLL_PowerUpTime; + USHORT usPCIEClkSSPercentage; + USHORT usPCIEClkSSType; + USHORT usLvdsSSPercentage; + USHORT usLvdsSSpreadRateIn10Hz; + USHORT usHDMISSPercentage; + USHORT usHDMISSpreadRateIn10Hz; + USHORT usDVISSPercentage; + USHORT usDVISSpreadRateIn10Hz; + ULONG ulGPUReservedSysMemBaseAddrLo; + ULONG ulGPUReservedSysMemBaseAddrHi; + ATOM_CLK_VOLT_CAPABILITY s5thDISPCLK_Voltage; + ULONG ulReserved5; + USHORT usMaxLVDSPclkFreqInSingleLink; + UCHAR ucLvdsMisc; + UCHAR ucTravisLVDSVolAdjust; + UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms; + UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms; + UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms; + UCHAR ucLVDSPwrOffSeqDEtoDIGON_in4Ms; + UCHAR ucLVDSOffToOnDelay_in4Ms; + UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms; + UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms; + UCHAR ucMinAllowedBL_Level; + ULONG ulLCDBitDepthControlVal; + ULONG ulNbpStateMemclkFreq[4]; + ULONG ulPSPVersion; + ULONG ulNbpStateNClkFreq[4]; + USHORT usNBPStateVoltage[4]; + USHORT usBootUpNBVoltage; + USHORT usReserved2; + ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo; +}ATOM_INTEGRATED_SYSTEM_INFO_V1_8; + +/********************************************************************************************************************** + ATOM_INTEGRATED_SYSTEM_INFO_V1_8 Description +ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock +ulDentistVCOFreq: Dentist VCO clock in 10kHz unit. +ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit. +sDISPCLK_Voltage: Report Display clock frequency requirement on GNB voltage(up to 4 voltage levels). + +ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Trinity projects: + ATOM_DEVICE_CRT1_SUPPORT 0x0001 + ATOM_DEVICE_DFP1_SUPPORT 0x0008 + ATOM_DEVICE_DFP6_SUPPORT 0x0040 + ATOM_DEVICE_DFP2_SUPPORT 0x0080 + ATOM_DEVICE_DFP3_SUPPORT 0x0200 + ATOM_DEVICE_DFP4_SUPPORT 0x0400 + ATOM_DEVICE_DFP5_SUPPORT 0x0800 + ATOM_DEVICE_LCD1_SUPPORT 0x0002 + +ulVBIOSMisc: Miscellenous flags for VBIOS requirement and interface + bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS. + =1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS. + bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS + =1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS + bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS + =1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS + bit[3]=0: VBIOS fast boot is disable + =1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open) + +ulGPUCapInfo: bit[0~2]= Reserved + bit[3]=0: Enable AUX HW mode detection logic + =1: Disable AUX HW mode detection logic + bit[4]=0: Disable DFS bypass feature + =1: Enable DFS bypass feature + +usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). + Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0; + + When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below: + 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use; + VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result, + Changing BL using VBIOS function is functional in both driver and non-driver present environment; + and enabling VariBri under the driver environment from PP table is optional. + + 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating + that BL control from GPU is expected. + VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1 + Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but + it's per platform + and enabling VariBri under the driver environment from PP table is optional. + +ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt. Threshold on value to enter HTC_active state. +ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt. + To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt. + +ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled + =1: PCIE Power Gating Enabled + Bit[1]=0: DDR-DLL shut-down feature disabled. + 1: DDR-DLL shut-down feature enabled. + Bit[2]=0: DDR-PLL Power down feature disabled. + 1: DDR-PLL Power down feature enabled. + Bit[3]=0: GNB DPM is disabled + =1: GNB DPM is enabled +ulCPUCapInfo: TBD + +usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure +usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set + to indicate a range. + SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004 + SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008 + SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010 + SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020 + +ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3;=5:GDDR5; [7:4] is reserved. +ucUMAChannelNumber: System memory channel numbers. + +strVBIOSMsg[40]: VBIOS boot up customized message string + +sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high + +ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. +ulIdleNClk: NCLK speed while memory runs in self-refresh state, used to calculate self-refresh latency. Unit in 10kHz. +ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns. +ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns. + +usPCIEClkSSPercentage: PCIE Clock Spread Spectrum Percentage in unit 0.01%; 100 mean 1%. +usPCIEClkSSType: PCIE Clock Spread Spectrum Type. 0 for Down spread(default); 1 for Center spread. +usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. +usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. +usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting. +usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. +usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting. +usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. + +usGPUReservedSysMemSize: Reserved system memory size for ACP engine in APU GNB, units in MB. 0/2/4MB based on CMOS options, current default could be 0MB. KV only, not on KB. +ulGPUReservedSysMemBaseAddrLo: Low 32 bits base address to the reserved system memory. +ulGPUReservedSysMemBaseAddrHi: High 32 bits base address to the reserved system memory. + +usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz +ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode + [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped + [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color + [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used + [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low ) + [bit5] Travid LVDS output voltage override enable, when =1, use ucTravisLVDSVolAdjust value to overwrite Traivs register LVDS_CTRL_4 +ucTravisLVDSVolAdjust When ucLVDSMisc[5]=1,it means platform SBIOS want to overwrite TravisLVDSVoltage. Then VBIOS will use ucTravisLVDSVolAdjust + value to program Travis register LVDS_CTRL_4 +ucLVDSPwrOnSeqDIGONtoDE_in4Ms: + LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ). + =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON. + This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. +ucLVDSPwrOnDEtoVARY_BL_in4Ms: + LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ). + =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON. + This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. +ucLVDSPwrOffVARY_BLtoDE_in4Ms: + LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off. + =0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON + This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. +ucLVDSPwrOffDEtoDIGON_in4Ms: + LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off. + =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON + This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. +ucLVDSOffToOnDelay_in4Ms: + LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active. + =0 means to use VBIOS default delay which is 125 ( 500ms ). + This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. +ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms: + LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active. + =0 means to use VBIOS default delay which is 0 ( 0ms ). + This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. + +ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms: + LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off. + =0 means to use VBIOS default delay which is 0 ( 0ms ). + This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable. +ucMinAllowedBL_Level: Lowest LCD backlight PWM level. This is customer platform specific parameters. By default it is 0. + +ulLCDBitDepthControlVal: GPU display control encoder bit dither control setting, used to program register mmFMT_BIT_DEPTH_CONTROL + +ulNbpStateMemclkFreq[4]: system memory clock frequncey in unit of 10Khz in different NB P-State(P0, P1, P2 & P3). +ulNbpStateNClkFreq[4]: NB P-State NClk frequency in different NB P-State +usNBPStateVoltage[4]: NB P-State (P0/P1 & P2/P3) voltage; NBP3 refers to lowes voltage +usBootUpNBVoltage: NB P-State voltage during boot up before driver loaded +sExtDispConnInfo: Display connector information table provided to VBIOS + +**********************************************************************************************************************/ + +// this Table is used for Kaveri/Kabini APU +typedef struct _ATOM_FUSION_SYSTEM_INFO_V2 +{ + ATOM_INTEGRATED_SYSTEM_INFO_V1_8 sIntegratedSysInfo; // refer to ATOM_INTEGRATED_SYSTEM_INFO_V1_8 definition + ULONG ulPowerplayTable[128]; // Update comments here to link new powerplay table definition structure +}ATOM_FUSION_SYSTEM_INFO_V2; + + +typedef struct _ATOM_I2C_REG_INFO +{ + UCHAR ucI2cRegIndex; + UCHAR ucI2cRegVal; +}ATOM_I2C_REG_INFO; + +// this IntegrateSystemInfoTable is used for Carrizo +typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulBootUpEngineClock; + ULONG ulDentistVCOFreq; + ULONG ulBootUpUMAClock; + ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4]; // no longer used, keep it as is to avoid driver compiling error + ULONG ulBootUpReqDisplayVector; + ULONG ulVBIOSMisc; + ULONG ulGPUCapInfo; + ULONG ulDISP_CLK2Freq; + USHORT usRequestedPWMFreqInHz; + UCHAR ucHtcTmpLmt; + UCHAR ucHtcHystLmt; + ULONG ulReserved2; + ULONG ulSystemConfig; + ULONG ulCPUCapInfo; + ULONG ulReserved3; + USHORT usGPUReservedSysMemSize; + USHORT usExtDispConnInfoOffset; + USHORT usPanelRefreshRateRange; + UCHAR ucMemoryType; + UCHAR ucUMAChannelNumber; + UCHAR strVBIOSMsg[40]; + ATOM_TDP_CONFIG asTdpConfig; + UCHAR ucExtHDMIReDrvSlvAddr; + UCHAR ucExtHDMIReDrvRegNum; + ATOM_I2C_REG_INFO asExtHDMIRegSetting[9]; + ULONG ulReserved[2]; + ATOM_CLK_VOLT_CAPABILITY_V2 sDispClkVoltageMapping[8]; + ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5]; // no longer used, keep it as is to avoid driver compiling error + ULONG ulGMCRestoreResetTime; + ULONG ulReserved4; + ULONG ulIdleNClk; + ULONG ulDDR_DLL_PowerUpTime; + ULONG ulDDR_PLL_PowerUpTime; + USHORT usPCIEClkSSPercentage; + USHORT usPCIEClkSSType; + USHORT usLvdsSSPercentage; + USHORT usLvdsSSpreadRateIn10Hz; + USHORT usHDMISSPercentage; + USHORT usHDMISSpreadRateIn10Hz; + USHORT usDVISSPercentage; + USHORT usDVISSpreadRateIn10Hz; + ULONG ulGPUReservedSysMemBaseAddrLo; + ULONG ulGPUReservedSysMemBaseAddrHi; + ULONG ulReserved5[3]; + USHORT usMaxLVDSPclkFreqInSingleLink; + UCHAR ucLvdsMisc; + UCHAR ucTravisLVDSVolAdjust; + UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms; + UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms; + UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms; + UCHAR ucLVDSPwrOffSeqDEtoDIGON_in4Ms; + UCHAR ucLVDSOffToOnDelay_in4Ms; + UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms; + UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms; + UCHAR ucMinAllowedBL_Level; + ULONG ulLCDBitDepthControlVal; + ULONG ulNbpStateMemclkFreq[4]; // only 2 level is changed. + ULONG ulPSPVersion; + ULONG ulNbpStateNClkFreq[4]; + USHORT usNBPStateVoltage[4]; + USHORT usBootUpNBVoltage; + UCHAR ucEDPv1_4VSMode; + UCHAR ucReserved2; + ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo; +}ATOM_INTEGRATED_SYSTEM_INFO_V1_9; + + +// definition for ucEDPv1_4VSMode +#define EDP_VS_LEGACY_MODE 0 +#define EDP_VS_LOW_VDIFF_MODE 1 +#define EDP_VS_HIGH_VDIFF_MODE 2 +#define EDP_VS_STRETCH_MODE 3 +#define EDP_VS_SINGLE_VDIFF_MODE 4 +#define EDP_VS_VARIABLE_PREM_MODE 5 + + +// this IntegrateSystemInfoTable is used for Carrizo +typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_10 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulBootUpEngineClock; + ULONG ulDentistVCOFreq; + ULONG ulBootUpUMAClock; + ULONG ulReserved0[8]; + ULONG ulBootUpReqDisplayVector; + ULONG ulVBIOSMisc; + ULONG ulGPUCapInfo; + ULONG ulReserved1; + USHORT usRequestedPWMFreqInHz; + UCHAR ucHtcTmpLmt; + UCHAR ucHtcHystLmt; + ULONG ulReserved2; + ULONG ulSystemConfig; + ULONG ulCPUCapInfo; + ULONG ulReserved3; + USHORT usGPUReservedSysMemSize; + USHORT usExtDispConnInfoOffset; + USHORT usPanelRefreshRateRange; + UCHAR ucMemoryType; + UCHAR ucUMAChannelNumber; + UCHAR strVBIOSMsg[40]; + ATOM_TDP_CONFIG asTdpConfig; + ULONG ulReserved[7]; + ATOM_CLK_VOLT_CAPABILITY_V2 sDispClkVoltageMapping[8]; + ULONG ulReserved6[10]; + ULONG ulGMCRestoreResetTime; + ULONG ulReserved4; + ULONG ulIdleNClk; + ULONG ulDDR_DLL_PowerUpTime; + ULONG ulDDR_PLL_PowerUpTime; + USHORT usPCIEClkSSPercentage; + USHORT usPCIEClkSSType; + USHORT usLvdsSSPercentage; + USHORT usLvdsSSpreadRateIn10Hz; + USHORT usHDMISSPercentage; + USHORT usHDMISSpreadRateIn10Hz; + USHORT usDVISSPercentage; + USHORT usDVISSpreadRateIn10Hz; + ULONG ulGPUReservedSysMemBaseAddrLo; + ULONG ulGPUReservedSysMemBaseAddrHi; + ULONG ulReserved5[3]; + USHORT usMaxLVDSPclkFreqInSingleLink; + UCHAR ucLvdsMisc; + UCHAR ucTravisLVDSVolAdjust; + UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms; + UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms; + UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms; + UCHAR ucLVDSPwrOffSeqDEtoDIGON_in4Ms; + UCHAR ucLVDSOffToOnDelay_in4Ms; + UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms; + UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms; + UCHAR ucMinAllowedBL_Level; + ULONG ulLCDBitDepthControlVal; + ULONG ulNbpStateMemclkFreq[2]; + ULONG ulReserved7[2]; + ULONG ulPSPVersion; + ULONG ulNbpStateNClkFreq[4]; + USHORT usNBPStateVoltage[4]; + USHORT usBootUpNBVoltage; + UCHAR ucEDPv1_4VSMode; + UCHAR ucReserved2; + ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo; +}ATOM_INTEGRATED_SYSTEM_INFO_V1_10; + +/**************************************************************************/ +// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design +//Memory SS Info Table +//Define Memory Clock SS chip ID +#define ICS91719 1 +#define ICS91720 2 + +//Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol +typedef struct _ATOM_I2C_DATA_RECORD +{ + UCHAR ucNunberOfBytes; //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop" + UCHAR ucI2CData[1]; //I2C data in bytes, should be less than 16 bytes usually +}ATOM_I2C_DATA_RECORD; + + +//Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information +typedef struct _ATOM_I2C_DEVICE_SETUP_INFO +{ + ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //I2C line and HW/SW assisted cap. + UCHAR ucSSChipID; //SS chip being used + UCHAR ucSSChipSlaveAddr; //Slave Address to set up this SS chip + UCHAR ucNumOfI2CDataRecords; //number of data block + ATOM_I2C_DATA_RECORD asI2CData[1]; +}ATOM_I2C_DEVICE_SETUP_INFO; + +//========================================================================================== +typedef struct _ATOM_ASIC_MVDD_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1]; +}ATOM_ASIC_MVDD_INFO; + +//========================================================================================== +#define ATOM_MCLK_SS_INFO ATOM_ASIC_MVDD_INFO + +//========================================================================================== +/**************************************************************************/ + +typedef struct _ATOM_ASIC_SS_ASSIGNMENT +{ + ULONG ulTargetClockRange; //Clock Out frequence (VCO ), in unit of 10Khz + USHORT usSpreadSpectrumPercentage; //in unit of 0.01% + USHORT usSpreadRateInKhz; //in unit of kHz, modulation freq + UCHAR ucClockIndication; //Indicate which clock source needs SS + UCHAR ucSpreadSpectrumMode; //Bit1=0 Down Spread,=1 Center Spread. + UCHAR ucReserved[2]; +}ATOM_ASIC_SS_ASSIGNMENT; + +//Define ucClockIndication, SW uses the IDs below to search if the SS is requried/enabled on a clock branch/signal type. +//SS is not required or enabled if a match is not found. +#define ASIC_INTERNAL_MEMORY_SS 1 +#define ASIC_INTERNAL_ENGINE_SS 2 +#define ASIC_INTERNAL_UVD_SS 3 +#define ASIC_INTERNAL_SS_ON_TMDS 4 +#define ASIC_INTERNAL_SS_ON_HDMI 5 +#define ASIC_INTERNAL_SS_ON_LVDS 6 +#define ASIC_INTERNAL_SS_ON_DP 7 +#define ASIC_INTERNAL_SS_ON_DCPLL 8 +#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9 +#define ASIC_INTERNAL_VCE_SS 10 +#define ASIC_INTERNAL_GPUPLL_SS 11 + + +typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2 +{ + ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz + //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 ) + USHORT usSpreadSpectrumPercentage; //in unit of 0.01% + USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq + UCHAR ucClockIndication; //Indicate which clock source needs SS + UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS + UCHAR ucReserved[2]; +}ATOM_ASIC_SS_ASSIGNMENT_V2; + +//ucSpreadSpectrumMode +//#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000 +//#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000 +//#define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001 +//#define ATOM_SS_CENTRE_SPREAD_MODE 0x00000001 +//#define ATOM_INTERNAL_SS_MASK 0x00000000 +//#define ATOM_EXTERNAL_SS_MASK 0x00000002 + +typedef struct _ATOM_ASIC_INTERNAL_SS_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4]; +}ATOM_ASIC_INTERNAL_SS_INFO; + +typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_ASIC_SS_ASSIGNMENT_V2 asSpreadSpectrum[1]; //this is point only. +}ATOM_ASIC_INTERNAL_SS_INFO_V2; + +typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3 +{ + ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz + //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 ) + USHORT usSpreadSpectrumPercentage; //in unit of 0.01% or 0.001%, decided by ucSpreadSpectrumMode bit4 + USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq + UCHAR ucClockIndication; //Indicate which clock source needs SS + UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS + UCHAR ucReserved[2]; +}ATOM_ASIC_SS_ASSIGNMENT_V3; + +//ATOM_ASIC_SS_ASSIGNMENT_V3.ucSpreadSpectrumMode +#define SS_MODE_V3_CENTRE_SPREAD_MASK 0x01 +#define SS_MODE_V3_EXTERNAL_SS_MASK 0x02 +#define SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK 0x10 + +typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_ASIC_SS_ASSIGNMENT_V3 asSpreadSpectrum[1]; //this is pointer only. +}ATOM_ASIC_INTERNAL_SS_INFO_V3; + + +//==============================Scratch Pad Definition Portion=============================== +#define ATOM_DEVICE_CONNECT_INFO_DEF 0 +#define ATOM_ROM_LOCATION_DEF 1 +#define ATOM_TV_STANDARD_DEF 2 +#define ATOM_ACTIVE_INFO_DEF 3 +#define ATOM_LCD_INFO_DEF 4 +#define ATOM_DOS_REQ_INFO_DEF 5 +#define ATOM_ACC_CHANGE_INFO_DEF 6 +#define ATOM_DOS_MODE_INFO_DEF 7 +#define ATOM_I2C_CHANNEL_STATUS_DEF 8 +#define ATOM_I2C_CHANNEL_STATUS1_DEF 9 +#define ATOM_INTERNAL_TIMER_DEF 10 + +// BIOS_0_SCRATCH Definition +#define ATOM_S0_CRT1_MONO 0x00000001L +#define ATOM_S0_CRT1_COLOR 0x00000002L +#define ATOM_S0_CRT1_MASK (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR) + +#define ATOM_S0_TV1_COMPOSITE_A 0x00000004L +#define ATOM_S0_TV1_SVIDEO_A 0x00000008L +#define ATOM_S0_TV1_MASK_A (ATOM_S0_TV1_COMPOSITE_A+ATOM_S0_TV1_SVIDEO_A) + +#define ATOM_S0_CV_A 0x00000010L +#define ATOM_S0_CV_DIN_A 0x00000020L +#define ATOM_S0_CV_MASK_A (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A) + + +#define ATOM_S0_CRT2_MONO 0x00000100L +#define ATOM_S0_CRT2_COLOR 0x00000200L +#define ATOM_S0_CRT2_MASK (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR) + +#define ATOM_S0_TV1_COMPOSITE 0x00000400L +#define ATOM_S0_TV1_SVIDEO 0x00000800L +#define ATOM_S0_TV1_SCART 0x00004000L +#define ATOM_S0_TV1_MASK (ATOM_S0_TV1_COMPOSITE+ATOM_S0_TV1_SVIDEO+ATOM_S0_TV1_SCART) + +#define ATOM_S0_CV 0x00001000L +#define ATOM_S0_CV_DIN 0x00002000L +#define ATOM_S0_CV_MASK (ATOM_S0_CV+ATOM_S0_CV_DIN) + +#define ATOM_S0_DFP1 0x00010000L +#define ATOM_S0_DFP2 0x00020000L +#define ATOM_S0_LCD1 0x00040000L +#define ATOM_S0_LCD2 0x00080000L +#define ATOM_S0_DFP6 0x00100000L +#define ATOM_S0_DFP3 0x00200000L +#define ATOM_S0_DFP4 0x00400000L +#define ATOM_S0_DFP5 0x00800000L + + +#define ATOM_S0_DFP_MASK ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5 | ATOM_S0_DFP6 + +#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L // If set, indicates we are running a PCIE asic with + // the FAD/HDP reg access bug. Bit is read by DAL, this is obsolete from RV5xx + +#define ATOM_S0_THERMAL_STATE_MASK 0x1C000000L +#define ATOM_S0_THERMAL_STATE_SHIFT 26 + +#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L +#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29 + +#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1 +#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2 +#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3 +#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LIT2AC 4 + +//Byte aligned defintion for BIOS usage +#define ATOM_S0_CRT1_MONOb0 0x01 +#define ATOM_S0_CRT1_COLORb0 0x02 +#define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0) + +#define ATOM_S0_TV1_COMPOSITEb0 0x04 +#define ATOM_S0_TV1_SVIDEOb0 0x08 +#define ATOM_S0_TV1_MASKb0 (ATOM_S0_TV1_COMPOSITEb0+ATOM_S0_TV1_SVIDEOb0) + +#define ATOM_S0_CVb0 0x10 +#define ATOM_S0_CV_DINb0 0x20 +#define ATOM_S0_CV_MASKb0 (ATOM_S0_CVb0+ATOM_S0_CV_DINb0) + +#define ATOM_S0_CRT2_MONOb1 0x01 +#define ATOM_S0_CRT2_COLORb1 0x02 +#define ATOM_S0_CRT2_MASKb1 (ATOM_S0_CRT2_MONOb1+ATOM_S0_CRT2_COLORb1) + +#define ATOM_S0_TV1_COMPOSITEb1 0x04 +#define ATOM_S0_TV1_SVIDEOb1 0x08 +#define ATOM_S0_TV1_SCARTb1 0x40 +#define ATOM_S0_TV1_MASKb1 (ATOM_S0_TV1_COMPOSITEb1+ATOM_S0_TV1_SVIDEOb1+ATOM_S0_TV1_SCARTb1) + +#define ATOM_S0_CVb1 0x10 +#define ATOM_S0_CV_DINb1 0x20 +#define ATOM_S0_CV_MASKb1 (ATOM_S0_CVb1+ATOM_S0_CV_DINb1) + +#define ATOM_S0_DFP1b2 0x01 +#define ATOM_S0_DFP2b2 0x02 +#define ATOM_S0_LCD1b2 0x04 +#define ATOM_S0_LCD2b2 0x08 +#define ATOM_S0_DFP6b2 0x10 +#define ATOM_S0_DFP3b2 0x20 +#define ATOM_S0_DFP4b2 0x40 +#define ATOM_S0_DFP5b2 0x80 + + +#define ATOM_S0_THERMAL_STATE_MASKb3 0x1C +#define ATOM_S0_THERMAL_STATE_SHIFTb3 2 + +#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0 +#define ATOM_S0_LCD1_SHIFT 18 + +// BIOS_1_SCRATCH Definition +#define ATOM_S1_ROM_LOCATION_MASK 0x0000FFFFL +#define ATOM_S1_PCI_BUS_DEV_MASK 0xFFFF0000L + +// BIOS_2_SCRATCH Definition +#define ATOM_S2_TV1_STANDARD_MASK 0x0000000FL +#define ATOM_S2_CURRENT_BL_LEVEL_MASK 0x0000FF00L +#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT 8 + +#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK 0x0C000000L +#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26 +#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE 0x10000000L + +#define ATOM_S2_DEVICE_DPMS_STATE 0x00010000L +#define ATOM_S2_VRI_BRIGHT_ENABLE 0x20000000L + +#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE 0x0 +#define ATOM_S2_DISPLAY_ROTATION_90_DEGREE 0x1 +#define ATOM_S2_DISPLAY_ROTATION_180_DEGREE 0x2 +#define ATOM_S2_DISPLAY_ROTATION_270_DEGREE 0x3 +#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30 +#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L + + +//Byte aligned defintion for BIOS usage +#define ATOM_S2_TV1_STANDARD_MASKb0 0x0F +#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF +#define ATOM_S2_DEVICE_DPMS_STATEb2 0x01 + +#define ATOM_S2_TMDS_COHERENT_MODEb3 0x10 // used by VBIOS code only, use coherent mode for TMDS/HDMI mode +#define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20 +#define ATOM_S2_ROTATION_STATE_MASKb3 0xC0 + + +// BIOS_3_SCRATCH Definition +#define ATOM_S3_CRT1_ACTIVE 0x00000001L +#define ATOM_S3_LCD1_ACTIVE 0x00000002L +#define ATOM_S3_TV1_ACTIVE 0x00000004L +#define ATOM_S3_DFP1_ACTIVE 0x00000008L +#define ATOM_S3_CRT2_ACTIVE 0x00000010L +#define ATOM_S3_LCD2_ACTIVE 0x00000020L +#define ATOM_S3_DFP6_ACTIVE 0x00000040L +#define ATOM_S3_DFP2_ACTIVE 0x00000080L +#define ATOM_S3_CV_ACTIVE 0x00000100L +#define ATOM_S3_DFP3_ACTIVE 0x00000200L +#define ATOM_S3_DFP4_ACTIVE 0x00000400L +#define ATOM_S3_DFP5_ACTIVE 0x00000800L + + +#define ATOM_S3_DEVICE_ACTIVE_MASK 0x00000FFFL + +#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE 0x00001000L +#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L + +#define ATOM_S3_CRT1_CRTC_ACTIVE 0x00010000L +#define ATOM_S3_LCD1_CRTC_ACTIVE 0x00020000L +#define ATOM_S3_TV1_CRTC_ACTIVE 0x00040000L +#define ATOM_S3_DFP1_CRTC_ACTIVE 0x00080000L +#define ATOM_S3_CRT2_CRTC_ACTIVE 0x00100000L +#define ATOM_S3_LCD2_CRTC_ACTIVE 0x00200000L +#define ATOM_S3_DFP6_CRTC_ACTIVE 0x00400000L +#define ATOM_S3_DFP2_CRTC_ACTIVE 0x00800000L +#define ATOM_S3_CV_CRTC_ACTIVE 0x01000000L +#define ATOM_S3_DFP3_CRTC_ACTIVE 0x02000000L +#define ATOM_S3_DFP4_CRTC_ACTIVE 0x04000000L +#define ATOM_S3_DFP5_CRTC_ACTIVE 0x08000000L + + +#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L +#define ATOM_S3_ASIC_GUI_ENGINE_HUNG 0x20000000L +//Below two definitions are not supported in pplib, but in the old powerplay in DAL +#define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L +#define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L + + + +//Byte aligned defintion for BIOS usage +#define ATOM_S3_CRT1_ACTIVEb0 0x01 +#define ATOM_S3_LCD1_ACTIVEb0 0x02 +#define ATOM_S3_TV1_ACTIVEb0 0x04 +#define ATOM_S3_DFP1_ACTIVEb0 0x08 +#define ATOM_S3_CRT2_ACTIVEb0 0x10 +#define ATOM_S3_LCD2_ACTIVEb0 0x20 +#define ATOM_S3_DFP6_ACTIVEb0 0x40 +#define ATOM_S3_DFP2_ACTIVEb0 0x80 +#define ATOM_S3_CV_ACTIVEb1 0x01 +#define ATOM_S3_DFP3_ACTIVEb1 0x02 +#define ATOM_S3_DFP4_ACTIVEb1 0x04 +#define ATOM_S3_DFP5_ACTIVEb1 0x08 + + +#define ATOM_S3_ACTIVE_CRTC1w0 0xFFF + +#define ATOM_S3_CRT1_CRTC_ACTIVEb2 0x01 +#define ATOM_S3_LCD1_CRTC_ACTIVEb2 0x02 +#define ATOM_S3_TV1_CRTC_ACTIVEb2 0x04 +#define ATOM_S3_DFP1_CRTC_ACTIVEb2 0x08 +#define ATOM_S3_CRT2_CRTC_ACTIVEb2 0x10 +#define ATOM_S3_LCD2_CRTC_ACTIVEb2 0x20 +#define ATOM_S3_DFP6_CRTC_ACTIVEb2 0x40 +#define ATOM_S3_DFP2_CRTC_ACTIVEb2 0x80 +#define ATOM_S3_CV_CRTC_ACTIVEb3 0x01 +#define ATOM_S3_DFP3_CRTC_ACTIVEb3 0x02 +#define ATOM_S3_DFP4_CRTC_ACTIVEb3 0x04 +#define ATOM_S3_DFP5_CRTC_ACTIVEb3 0x08 + + +#define ATOM_S3_ACTIVE_CRTC2w1 0xFFF + + +// BIOS_4_SCRATCH Definition +#define ATOM_S4_LCD1_PANEL_ID_MASK 0x000000FFL +#define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L +#define ATOM_S4_LCD1_REFRESH_SHIFT 8 + +//Byte aligned defintion for BIOS usage +#define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF +#define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0 +#define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0 + +// BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!! +#define ATOM_S5_DOS_REQ_CRT1b0 0x01 +#define ATOM_S5_DOS_REQ_LCD1b0 0x02 +#define ATOM_S5_DOS_REQ_TV1b0 0x04 +#define ATOM_S5_DOS_REQ_DFP1b0 0x08 +#define ATOM_S5_DOS_REQ_CRT2b0 0x10 +#define ATOM_S5_DOS_REQ_LCD2b0 0x20 +#define ATOM_S5_DOS_REQ_DFP6b0 0x40 +#define ATOM_S5_DOS_REQ_DFP2b0 0x80 +#define ATOM_S5_DOS_REQ_CVb1 0x01 +#define ATOM_S5_DOS_REQ_DFP3b1 0x02 +#define ATOM_S5_DOS_REQ_DFP4b1 0x04 +#define ATOM_S5_DOS_REQ_DFP5b1 0x08 + + +#define ATOM_S5_DOS_REQ_DEVICEw0 0x0FFF + +#define ATOM_S5_DOS_REQ_CRT1 0x0001 +#define ATOM_S5_DOS_REQ_LCD1 0x0002 +#define ATOM_S5_DOS_REQ_TV1 0x0004 +#define ATOM_S5_DOS_REQ_DFP1 0x0008 +#define ATOM_S5_DOS_REQ_CRT2 0x0010 +#define ATOM_S5_DOS_REQ_LCD2 0x0020 +#define ATOM_S5_DOS_REQ_DFP6 0x0040 +#define ATOM_S5_DOS_REQ_DFP2 0x0080 +#define ATOM_S5_DOS_REQ_CV 0x0100 +#define ATOM_S5_DOS_REQ_DFP3 0x0200 +#define ATOM_S5_DOS_REQ_DFP4 0x0400 +#define ATOM_S5_DOS_REQ_DFP5 0x0800 + +#define ATOM_S5_DOS_FORCE_CRT1b2 ATOM_S5_DOS_REQ_CRT1b0 +#define ATOM_S5_DOS_FORCE_TV1b2 ATOM_S5_DOS_REQ_TV1b0 +#define ATOM_S5_DOS_FORCE_CRT2b2 ATOM_S5_DOS_REQ_CRT2b0 +#define ATOM_S5_DOS_FORCE_CVb3 ATOM_S5_DOS_REQ_CVb1 +#define ATOM_S5_DOS_FORCE_DEVICEw1 (ATOM_S5_DOS_FORCE_CRT1b2+ATOM_S5_DOS_FORCE_TV1b2+ATOM_S5_DOS_FORCE_CRT2b2+\ + (ATOM_S5_DOS_FORCE_CVb3<<8)) +// BIOS_6_SCRATCH Definition +#define ATOM_S6_DEVICE_CHANGE 0x00000001L +#define ATOM_S6_SCALER_CHANGE 0x00000002L +#define ATOM_S6_LID_CHANGE 0x00000004L +#define ATOM_S6_DOCKING_CHANGE 0x00000008L +#define ATOM_S6_ACC_MODE 0x00000010L +#define ATOM_S6_EXT_DESKTOP_MODE 0x00000020L +#define ATOM_S6_LID_STATE 0x00000040L +#define ATOM_S6_DOCK_STATE 0x00000080L +#define ATOM_S6_CRITICAL_STATE 0x00000100L +#define ATOM_S6_HW_I2C_BUSY_STATE 0x00000200L +#define ATOM_S6_THERMAL_STATE_CHANGE 0x00000400L +#define ATOM_S6_INTERRUPT_SET_BY_BIOS 0x00000800L +#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L //Normal expansion Request bit for LCD +#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L //Aspect ratio expansion Request bit for LCD + +#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L //This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion +#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L //This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion + +#define ATOM_S6_ACC_REQ_CRT1 0x00010000L +#define ATOM_S6_ACC_REQ_LCD1 0x00020000L +#define ATOM_S6_ACC_REQ_TV1 0x00040000L +#define ATOM_S6_ACC_REQ_DFP1 0x00080000L +#define ATOM_S6_ACC_REQ_CRT2 0x00100000L +#define ATOM_S6_ACC_REQ_LCD2 0x00200000L +#define ATOM_S6_ACC_REQ_DFP6 0x00400000L +#define ATOM_S6_ACC_REQ_DFP2 0x00800000L +#define ATOM_S6_ACC_REQ_CV 0x01000000L +#define ATOM_S6_ACC_REQ_DFP3 0x02000000L +#define ATOM_S6_ACC_REQ_DFP4 0x04000000L +#define ATOM_S6_ACC_REQ_DFP5 0x08000000L + +#define ATOM_S6_ACC_REQ_MASK 0x0FFF0000L +#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE 0x10000000L +#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH 0x20000000L +#define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L +#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L + +//Byte aligned defintion for BIOS usage +#define ATOM_S6_DEVICE_CHANGEb0 0x01 +#define ATOM_S6_SCALER_CHANGEb0 0x02 +#define ATOM_S6_LID_CHANGEb0 0x04 +#define ATOM_S6_DOCKING_CHANGEb0 0x08 +#define ATOM_S6_ACC_MODEb0 0x10 +#define ATOM_S6_EXT_DESKTOP_MODEb0 0x20 +#define ATOM_S6_LID_STATEb0 0x40 +#define ATOM_S6_DOCK_STATEb0 0x80 +#define ATOM_S6_CRITICAL_STATEb1 0x01 +#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02 +#define ATOM_S6_THERMAL_STATE_CHANGEb1 0x04 +#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08 +#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10 +#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20 + +#define ATOM_S6_ACC_REQ_CRT1b2 0x01 +#define ATOM_S6_ACC_REQ_LCD1b2 0x02 +#define ATOM_S6_ACC_REQ_TV1b2 0x04 +#define ATOM_S6_ACC_REQ_DFP1b2 0x08 +#define ATOM_S6_ACC_REQ_CRT2b2 0x10 +#define ATOM_S6_ACC_REQ_LCD2b2 0x20 +#define ATOM_S6_ACC_REQ_DFP6b2 0x40 +#define ATOM_S6_ACC_REQ_DFP2b2 0x80 +#define ATOM_S6_ACC_REQ_CVb3 0x01 +#define ATOM_S6_ACC_REQ_DFP3b3 0x02 +#define ATOM_S6_ACC_REQ_DFP4b3 0x04 +#define ATOM_S6_ACC_REQ_DFP5b3 0x08 + +#define ATOM_S6_ACC_REQ_DEVICEw1 ATOM_S5_DOS_REQ_DEVICEw0 +#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10 +#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCHb3 0x20 +#define ATOM_S6_VRI_BRIGHTNESS_CHANGEb3 0x40 +#define ATOM_S6_CONFIG_DISPLAY_CHANGEb3 0x80 + +#define ATOM_S6_DEVICE_CHANGE_SHIFT 0 +#define ATOM_S6_SCALER_CHANGE_SHIFT 1 +#define ATOM_S6_LID_CHANGE_SHIFT 2 +#define ATOM_S6_DOCKING_CHANGE_SHIFT 3 +#define ATOM_S6_ACC_MODE_SHIFT 4 +#define ATOM_S6_EXT_DESKTOP_MODE_SHIFT 5 +#define ATOM_S6_LID_STATE_SHIFT 6 +#define ATOM_S6_DOCK_STATE_SHIFT 7 +#define ATOM_S6_CRITICAL_STATE_SHIFT 8 +#define ATOM_S6_HW_I2C_BUSY_STATE_SHIFT 9 +#define ATOM_S6_THERMAL_STATE_CHANGE_SHIFT 10 +#define ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT 11 +#define ATOM_S6_REQ_SCALER_SHIFT 12 +#define ATOM_S6_REQ_SCALER_ARATIO_SHIFT 13 +#define ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT 14 +#define ATOM_S6_I2C_STATE_CHANGE_SHIFT 15 +#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT 28 +#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH_SHIFT 29 +#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT 30 +#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT 31 + +// BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!! +#define ATOM_S7_DOS_MODE_TYPEb0 0x03 +#define ATOM_S7_DOS_MODE_VGAb0 0x00 +#define ATOM_S7_DOS_MODE_VESAb0 0x01 +#define ATOM_S7_DOS_MODE_EXTb0 0x02 +#define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0 0x0C +#define ATOM_S7_DOS_MODE_PIXEL_FORMATb0 0xF0 +#define ATOM_S7_DOS_8BIT_DAC_ENb1 0x01 +#define ATOM_S7_ASIC_INIT_COMPLETEb1 0x02 +#define ATOM_S7_ASIC_INIT_COMPLETE_MASK 0x00000200 +#define ATOM_S7_DOS_MODE_NUMBERw1 0x0FFFF + +#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8 + +// BIOS_8_SCRATCH Definition +#define ATOM_S8_I2C_CHANNEL_BUSY_MASK 0x00000FFFF +#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000 + +#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT 0 +#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT 16 + +// BIOS_9_SCRATCH Definition +#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK +#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 0x0000FFFF +#endif +#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK +#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK 0xFFFF0000 +#endif +#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT +#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0 +#endif +#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT +#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 16 +#endif + + +#define ATOM_FLAG_SET 0x20 +#define ATOM_FLAG_CLEAR 0 +#define CLEAR_ATOM_S6_ACC_MODE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR) +#define SET_ATOM_S6_DEVICE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET) +#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET) +#define SET_ATOM_S6_SCALER_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET) +#define SET_ATOM_S6_LID_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET) + +#define SET_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET) +#define CLEAR_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR) + +#define SET_ATOM_S6_DOCK_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET) +#define SET_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET) +#define CLEAR_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR) + +#define SET_ATOM_S6_THERMAL_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET) +#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET) +#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET) + +#define SET_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET) +#define CLEAR_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR) + +#define SET_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET) +#define CLEAR_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR ) + +#define SET_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET ) +#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR ) + +#define SET_ATOM_S6_I2C_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET ) + +#define SET_ATOM_S6_DISPLAY_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET ) + +#define SET_ATOM_S6_DEVICE_RECONFIG ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET) +#define CLEAR_ATOM_S0_LCD1 ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 )| ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR ) +#define SET_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET ) +#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR ) + +/****************************************************************************/ +//Portion II: Definitinos only used in Driver +/****************************************************************************/ + +// Macros used by driver + +#ifdef __cplusplus +#define GetIndexIntoMasterTable(MasterOrData, FieldName) ((reinterpret_cast(&(static_cast(0))->FieldName)-static_cast(0))/sizeof(USHORT)) + +#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F) +#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) (((static_cast(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F) +#else // not __cplusplus +#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT)) + +#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F) +#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F) +#endif // __cplusplus + +#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION +#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION + +/****************************************************************************/ +//Portion III: Definitinos only used in VBIOS +/****************************************************************************/ +#define ATOM_DAC_SRC 0x80 +#define ATOM_SRC_DAC1 0 +#define ATOM_SRC_DAC2 0x80 + + + +typedef struct _MEMORY_PLLINIT_PARAMETERS +{ + ULONG ulTargetMemoryClock; //In 10Khz unit + UCHAR ucAction; //not define yet + UCHAR ucFbDiv_Hi; //Fbdiv Hi byte + UCHAR ucFbDiv; //FB value + UCHAR ucPostDiv; //Post div +}MEMORY_PLLINIT_PARAMETERS; + +#define MEMORY_PLLINIT_PS_ALLOCATION MEMORY_PLLINIT_PARAMETERS + + +#define GPIO_PIN_WRITE 0x01 +#define GPIO_PIN_READ 0x00 + +typedef struct _GPIO_PIN_CONTROL_PARAMETERS +{ + UCHAR ucGPIO_ID; //return value, read from GPIO pins + UCHAR ucGPIOBitShift; //define which bit in uGPIOBitVal need to be update + UCHAR ucGPIOBitVal; //Set/Reset corresponding bit defined in ucGPIOBitMask + UCHAR ucAction; //=GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write +}GPIO_PIN_CONTROL_PARAMETERS; + +typedef struct _ENABLE_SCALER_PARAMETERS +{ + UCHAR ucScaler; // ATOM_SCALER1, ATOM_SCALER2 + UCHAR ucEnable; // ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION + UCHAR ucTVStandard; // + UCHAR ucPadding[1]; +}ENABLE_SCALER_PARAMETERS; +#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS + +//ucEnable: +#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION 0 +#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION 1 +#define SCALER_ENABLE_2TAP_ALPHA_MODE 2 +#define SCALER_ENABLE_MULTITAP_MODE 3 + +typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS +{ + ULONG usHWIconHorzVertPosn; // Hardware Icon Vertical position + UCHAR ucHWIconVertOffset; // Hardware Icon Vertical offset + UCHAR ucHWIconHorzOffset; // Hardware Icon Horizontal offset + UCHAR ucSelection; // ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2 + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE +}ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS; + +typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION +{ + ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon; + ENABLE_CRTC_PARAMETERS sReserved; +}ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION; + +typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS +{ + USHORT usHight; // Image Hight + USHORT usWidth; // Image Width + UCHAR ucSurface; // Surface 1 or 2 + UCHAR ucPadding[3]; +}ENABLE_GRAPH_SURFACE_PARAMETERS; + +typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2 +{ + USHORT usHight; // Image Hight + USHORT usWidth; // Image Width + UCHAR ucSurface; // Surface 1 or 2 + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE + UCHAR ucPadding[2]; +}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2; + +typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3 +{ + USHORT usHight; // Image Hight + USHORT usWidth; // Image Width + UCHAR ucSurface; // Surface 1 or 2 + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE + USHORT usDeviceId; // Active Device Id for this surface. If no device, set to 0. +}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3; + +typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4 +{ + USHORT usHight; // Image Hight + USHORT usWidth; // Image Width + USHORT usGraphPitch; + UCHAR ucColorDepth; + UCHAR ucPixelFormat; + UCHAR ucSurface; // Surface 1 or 2 + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE + UCHAR ucModeType; + UCHAR ucReserved; +}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4; + +// ucEnable +#define ATOM_GRAPH_CONTROL_SET_PITCH 0x0f +#define ATOM_GRAPH_CONTROL_SET_DISP_START 0x10 + +typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION +{ + ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface; + ENABLE_YUV_PS_ALLOCATION sReserved; // Don't set this one +}ENABLE_GRAPH_SURFACE_PS_ALLOCATION; + +typedef struct _MEMORY_CLEAN_UP_PARAMETERS +{ + USHORT usMemoryStart; //in 8Kb boundry, offset from memory base address + USHORT usMemorySize; //8Kb blocks aligned +}MEMORY_CLEAN_UP_PARAMETERS; + +#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS + +typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS +{ + USHORT usX_Size; //When use as input parameter, usX_Size indicates which CRTC + USHORT usY_Size; +}GET_DISPLAY_SURFACE_SIZE_PARAMETERS; + +typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2 +{ + union{ + USHORT usX_Size; //When use as input parameter, usX_Size indicates which CRTC + USHORT usSurface; + }; + USHORT usY_Size; + USHORT usDispXStart; + USHORT usDispYStart; +}GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2; + + +typedef struct _PALETTE_DATA_CONTROL_PARAMETERS_V3 +{ + UCHAR ucLutId; + UCHAR ucAction; + USHORT usLutStartIndex; + USHORT usLutLength; + USHORT usLutOffsetInVram; +}PALETTE_DATA_CONTROL_PARAMETERS_V3; + +// ucAction: +#define PALETTE_DATA_AUTO_FILL 1 +#define PALETTE_DATA_READ 2 +#define PALETTE_DATA_WRITE 3 + + +typedef struct _INTERRUPT_SERVICE_PARAMETERS_V2 +{ + UCHAR ucInterruptId; + UCHAR ucServiceId; + UCHAR ucStatus; + UCHAR ucReserved; +}INTERRUPT_SERVICE_PARAMETER_V2; + +// ucInterruptId +#define HDP1_INTERRUPT_ID 1 +#define HDP2_INTERRUPT_ID 2 +#define HDP3_INTERRUPT_ID 3 +#define HDP4_INTERRUPT_ID 4 +#define HDP5_INTERRUPT_ID 5 +#define HDP6_INTERRUPT_ID 6 +#define SW_INTERRUPT_ID 11 + +// ucAction +#define INTERRUPT_SERVICE_GEN_SW_INT 1 +#define INTERRUPT_SERVICE_GET_STATUS 2 + + // ucStatus +#define INTERRUPT_STATUS__INT_TRIGGER 1 +#define INTERRUPT_STATUS__HPD_HIGH 2 + +typedef struct _EFUSE_INPUT_PARAMETER +{ + USHORT usEfuseIndex; + UCHAR ucBitShift; + UCHAR ucBitLength; +}EFUSE_INPUT_PARAMETER; + +// ReadEfuseValue command table input/output parameter +typedef union _READ_EFUSE_VALUE_PARAMETER +{ + EFUSE_INPUT_PARAMETER sEfuse; + ULONG ulEfuseValue; +}READ_EFUSE_VALUE_PARAMETER; + +typedef struct _INDIRECT_IO_ACCESS +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR IOAccessSequence[256]; +} INDIRECT_IO_ACCESS; + +#define INDIRECT_READ 0x00 +#define INDIRECT_WRITE 0x80 + +#define INDIRECT_IO_MM 0 +#define INDIRECT_IO_PLL 1 +#define INDIRECT_IO_MC 2 +#define INDIRECT_IO_PCIE 3 +#define INDIRECT_IO_PCIEP 4 +#define INDIRECT_IO_NBMISC 5 +#define INDIRECT_IO_SMU 5 + +#define INDIRECT_IO_PLL_READ INDIRECT_IO_PLL | INDIRECT_READ +#define INDIRECT_IO_PLL_WRITE INDIRECT_IO_PLL | INDIRECT_WRITE +#define INDIRECT_IO_MC_READ INDIRECT_IO_MC | INDIRECT_READ +#define INDIRECT_IO_MC_WRITE INDIRECT_IO_MC | INDIRECT_WRITE +#define INDIRECT_IO_PCIE_READ INDIRECT_IO_PCIE | INDIRECT_READ +#define INDIRECT_IO_PCIE_WRITE INDIRECT_IO_PCIE | INDIRECT_WRITE +#define INDIRECT_IO_PCIEP_READ INDIRECT_IO_PCIEP | INDIRECT_READ +#define INDIRECT_IO_PCIEP_WRITE INDIRECT_IO_PCIEP | INDIRECT_WRITE +#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ +#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE +#define INDIRECT_IO_SMU_READ INDIRECT_IO_SMU | INDIRECT_READ +#define INDIRECT_IO_SMU_WRITE INDIRECT_IO_SMU | INDIRECT_WRITE + + +typedef struct _ATOM_OEM_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; +}ATOM_OEM_INFO; + +typedef struct _ATOM_TV_MODE +{ + UCHAR ucVMode_Num; //Video mode number + UCHAR ucTV_Mode_Num; //Internal TV mode number +}ATOM_TV_MODE; + +typedef struct _ATOM_BIOS_INT_TVSTD_MODE +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usTV_Mode_LUT_Offset; // Pointer to standard to internal number conversion table + USHORT usTV_FIFO_Offset; // Pointer to FIFO entry table + USHORT usNTSC_Tbl_Offset; // Pointer to SDTV_Mode_NTSC table + USHORT usPAL_Tbl_Offset; // Pointer to SDTV_Mode_PAL table + USHORT usCV_Tbl_Offset; // Pointer to SDTV_Mode_PAL table +}ATOM_BIOS_INT_TVSTD_MODE; + + +typedef struct _ATOM_TV_MODE_SCALER_PTR +{ + USHORT ucFilter0_Offset; //Pointer to filter format 0 coefficients + USHORT usFilter1_Offset; //Pointer to filter format 0 coefficients + UCHAR ucTV_Mode_Num; +}ATOM_TV_MODE_SCALER_PTR; + +typedef struct _ATOM_STANDARD_VESA_TIMING +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_DTD_FORMAT aModeTimings[16]; // 16 is not the real array number, just for initial allocation +}ATOM_STANDARD_VESA_TIMING; + + +typedef struct _ATOM_STD_FORMAT +{ + USHORT usSTD_HDisp; + USHORT usSTD_VDisp; + USHORT usSTD_RefreshRate; + USHORT usReserved; +}ATOM_STD_FORMAT; + +typedef struct _ATOM_VESA_TO_EXTENDED_MODE +{ + USHORT usVESA_ModeNumber; + USHORT usExtendedModeNumber; +}ATOM_VESA_TO_EXTENDED_MODE; + +typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76]; +}ATOM_VESA_TO_INTENAL_MODE_LUT; + +/*************** ATOM Memory Related Data Structure ***********************/ +typedef struct _ATOM_MEMORY_VENDOR_BLOCK{ + UCHAR ucMemoryType; + UCHAR ucMemoryVendor; + UCHAR ucAdjMCId; + UCHAR ucDynClkId; + ULONG ulDllResetClkRange; +}ATOM_MEMORY_VENDOR_BLOCK; + + +typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG{ +#if ATOM_BIG_ENDIAN + ULONG ucMemBlkId:8; + ULONG ulMemClockRange:24; +#else + ULONG ulMemClockRange:24; + ULONG ucMemBlkId:8; +#endif +}ATOM_MEMORY_SETTING_ID_CONFIG; + +typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS +{ + ATOM_MEMORY_SETTING_ID_CONFIG slAccess; + ULONG ulAccess; +}ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS; + + +typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{ + ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID; + ULONG aulMemData[1]; +}ATOM_MEMORY_SETTING_DATA_BLOCK; + + +typedef struct _ATOM_INIT_REG_INDEX_FORMAT{ + USHORT usRegIndex; // MC register index + UCHAR ucPreRegDataLength; // offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf +}ATOM_INIT_REG_INDEX_FORMAT; + + +typedef struct _ATOM_INIT_REG_BLOCK{ + USHORT usRegIndexTblSize; //size of asRegIndexBuf + USHORT usRegDataBlkSize; //size of ATOM_MEMORY_SETTING_DATA_BLOCK + ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1]; + ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1]; +}ATOM_INIT_REG_BLOCK; + +#define END_OF_REG_INDEX_BLOCK 0x0ffff +#define END_OF_REG_DATA_BLOCK 0x00000000 +#define ATOM_INIT_REG_MASK_FLAG 0x80 //Not used in BIOS +#define CLOCK_RANGE_HIGHEST 0x00ffffff + +#define VALUE_DWORD SIZEOF ULONG +#define VALUE_SAME_AS_ABOVE 0 +#define VALUE_MASK_DWORD 0x84 + +#define INDEX_ACCESS_RANGE_BEGIN (VALUE_DWORD + 1) +#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1) +#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1) +//#define ACCESS_MCIODEBUGIND 0x40 //defined in BIOS code +#define ACCESS_PLACEHOLDER 0x80 + + +typedef struct _ATOM_MC_INIT_PARAM_TABLE +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usAdjustARB_SEQDataOffset; + USHORT usMCInitMemTypeTblOffset; + USHORT usMCInitCommonTblOffset; + USHORT usMCInitPowerDownTblOffset; + ULONG ulARB_SEQDataBuf[32]; + ATOM_INIT_REG_BLOCK asMCInitMemType; + ATOM_INIT_REG_BLOCK asMCInitCommon; +}ATOM_MC_INIT_PARAM_TABLE; + + +typedef struct _ATOM_REG_INIT_SETTING +{ + USHORT usRegIndex; + ULONG ulRegValue; +}ATOM_REG_INIT_SETTING; + +typedef struct _ATOM_MC_INIT_PARAM_TABLE_V2_1 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulMCUcodeVersion; + ULONG ulMCUcodeRomStartAddr; + ULONG ulMCUcodeLength; + USHORT usMcRegInitTableOffset; // offset of ATOM_REG_INIT_SETTING array for MC core register settings. + USHORT usReserved; // offset of ATOM_INIT_REG_BLOCK for MC SEQ/PHY regsiter setting +}ATOM_MC_INIT_PARAM_TABLE_V2_1; + + +#define _4Mx16 0x2 +#define _4Mx32 0x3 +#define _8Mx16 0x12 +#define _8Mx32 0x13 +#define _8Mx128 0x15 +#define _16Mx16 0x22 +#define _16Mx32 0x23 +#define _16Mx128 0x25 +#define _32Mx16 0x32 +#define _32Mx32 0x33 +#define _32Mx128 0x35 +#define _64Mx32 0x43 +#define _64Mx8 0x41 +#define _64Mx16 0x42 +#define _128Mx8 0x51 +#define _128Mx16 0x52 +#define _128Mx32 0x53 +#define _256Mx8 0x61 +#define _256Mx16 0x62 +#define _512Mx8 0x71 + + +#define SAMSUNG 0x1 +#define INFINEON 0x2 +#define ELPIDA 0x3 +#define ETRON 0x4 +#define NANYA 0x5 +#define HYNIX 0x6 +#define MOSEL 0x7 +#define WINBOND 0x8 +#define ESMT 0x9 +#define MICRON 0xF + +#define QIMONDA INFINEON +#define PROMOS MOSEL +#define KRETON INFINEON +#define ELIXIR NANYA +#define MEZZA ELPIDA + + +/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// + +#define UCODE_ROM_START_ADDRESS 0x1b800 +#define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode + +//uCode block header for reference + +typedef struct _MCuCodeHeader +{ + ULONG ulSignature; + UCHAR ucRevision; + UCHAR ucChecksum; + UCHAR ucReserved1; + UCHAR ucReserved2; + USHORT usParametersLength; + USHORT usUCodeLength; + USHORT usReserved1; + USHORT usReserved2; +} MCuCodeHeader; + +////////////////////////////////////////////////////////////////////////////////// + +#define ATOM_MAX_NUMBER_OF_VRAM_MODULE 16 + +#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK 0xF +typedef struct _ATOM_VRAM_MODULE_V1 +{ + ULONG ulReserved; + USHORT usEMRSValue; + USHORT usMRSValue; + USHORT usReserved; + UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module + UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved; + UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender + UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... + UCHAR ucRow; // Number of Row,in power of 2; + UCHAR ucColumn; // Number of Column,in power of 2; + UCHAR ucBank; // Nunber of Bank; + UCHAR ucRank; // Number of Rank, in power of 2 + UCHAR ucChannelNum; // Number of channel; + UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 + UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; + UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; + UCHAR ucReserved[2]; +}ATOM_VRAM_MODULE_V1; + + +typedef struct _ATOM_VRAM_MODULE_V2 +{ + ULONG ulReserved; + ULONG ulFlags; // To enable/disable functionalities based on memory type + ULONG ulEngineClock; // Override of default engine clock for particular memory type + ULONG ulMemoryClock; // Override of default memory clock for particular memory type + USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type + USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type + USHORT usEMRSValue; + USHORT usMRSValue; + USHORT usReserved; + UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module + UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; + UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed + UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... + UCHAR ucRow; // Number of Row,in power of 2; + UCHAR ucColumn; // Number of Column,in power of 2; + UCHAR ucBank; // Nunber of Bank; + UCHAR ucRank; // Number of Rank, in power of 2 + UCHAR ucChannelNum; // Number of channel; + UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 + UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; + UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; + UCHAR ucRefreshRateFactor; + UCHAR ucReserved[3]; +}ATOM_VRAM_MODULE_V2; + + +typedef struct _ATOM_MEMORY_TIMING_FORMAT +{ + ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing + union{ + USHORT usMRS; // mode register + USHORT usDDR3_MR0; + }; + union{ + USHORT usEMRS; // extended mode register + USHORT usDDR3_MR1; + }; + UCHAR ucCL; // CAS latency + UCHAR ucWL; // WRITE Latency + UCHAR uctRAS; // tRAS + UCHAR uctRC; // tRC + UCHAR uctRFC; // tRFC + UCHAR uctRCDR; // tRCDR + UCHAR uctRCDW; // tRCDW + UCHAR uctRP; // tRP + UCHAR uctRRD; // tRRD + UCHAR uctWR; // tWR + UCHAR uctWTR; // tWTR + UCHAR uctPDIX; // tPDIX + UCHAR uctFAW; // tFAW + UCHAR uctAOND; // tAOND + union + { + struct { + UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon + UCHAR ucReserved; + }; + USHORT usDDR3_MR2; + }; +}ATOM_MEMORY_TIMING_FORMAT; + + +typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1 +{ + ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing + USHORT usMRS; // mode register + USHORT usEMRS; // extended mode register + UCHAR ucCL; // CAS latency + UCHAR ucWL; // WRITE Latency + UCHAR uctRAS; // tRAS + UCHAR uctRC; // tRC + UCHAR uctRFC; // tRFC + UCHAR uctRCDR; // tRCDR + UCHAR uctRCDW; // tRCDW + UCHAR uctRP; // tRP + UCHAR uctRRD; // tRRD + UCHAR uctWR; // tWR + UCHAR uctWTR; // tWTR + UCHAR uctPDIX; // tPDIX + UCHAR uctFAW; // tFAW + UCHAR uctAOND; // tAOND + UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon +////////////////////////////////////GDDR parameters/////////////////////////////////// + UCHAR uctCCDL; // + UCHAR uctCRCRL; // + UCHAR uctCRCWL; // + UCHAR uctCKE; // + UCHAR uctCKRSE; // + UCHAR uctCKRSX; // + UCHAR uctFAW32; // + UCHAR ucMR5lo; // + UCHAR ucMR5hi; // + UCHAR ucTerminator; +}ATOM_MEMORY_TIMING_FORMAT_V1; + + + + +typedef struct _ATOM_MEMORY_TIMING_FORMAT_V2 +{ + ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing + USHORT usMRS; // mode register + USHORT usEMRS; // extended mode register + UCHAR ucCL; // CAS latency + UCHAR ucWL; // WRITE Latency + UCHAR uctRAS; // tRAS + UCHAR uctRC; // tRC + UCHAR uctRFC; // tRFC + UCHAR uctRCDR; // tRCDR + UCHAR uctRCDW; // tRCDW + UCHAR uctRP; // tRP + UCHAR uctRRD; // tRRD + UCHAR uctWR; // tWR + UCHAR uctWTR; // tWTR + UCHAR uctPDIX; // tPDIX + UCHAR uctFAW; // tFAW + UCHAR uctAOND; // tAOND + UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon +////////////////////////////////////GDDR parameters/////////////////////////////////// + UCHAR uctCCDL; // + UCHAR uctCRCRL; // + UCHAR uctCRCWL; // + UCHAR uctCKE; // + UCHAR uctCKRSE; // + UCHAR uctCKRSX; // + UCHAR uctFAW32; // + UCHAR ucMR4lo; // + UCHAR ucMR4hi; // + UCHAR ucMR5lo; // + UCHAR ucMR5hi; // + UCHAR ucTerminator; + UCHAR ucReserved; +}ATOM_MEMORY_TIMING_FORMAT_V2; + + +typedef struct _ATOM_MEMORY_FORMAT +{ + ULONG ulDllDisClock; // memory DLL will be disable when target memory clock is below this clock + union{ + USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type + USHORT usDDR3_Reserved; // Not used for DDR3 memory + }; + union{ + USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type + USHORT usDDR3_MR3; // Used for DDR3 memory + }; + UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; + UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed + UCHAR ucRow; // Number of Row,in power of 2; + UCHAR ucColumn; // Number of Column,in power of 2; + UCHAR ucBank; // Nunber of Bank; + UCHAR ucRank; // Number of Rank, in power of 2 + UCHAR ucBurstSize; // burst size, 0= burst size=4 1= burst size=8 + UCHAR ucDllDisBit; // position of DLL Enable/Disable bit in EMRS ( Extended Mode Register ) + UCHAR ucRefreshRateFactor; // memory refresh rate in unit of ms + UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 + UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble + UCHAR ucMemAttrib; // Memory Device Addribute, like RDBI/WDBI etc + ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; // Memory Timing block sort from lower clock to higher clock +}ATOM_MEMORY_FORMAT; + + +typedef struct _ATOM_VRAM_MODULE_V3 +{ + ULONG ulChannelMapCfg; // board dependent paramenter:Channel combination + USHORT usSize; // size of ATOM_VRAM_MODULE_V3 + USHORT usDefaultMVDDQ; // board dependent parameter:Default Memory Core Voltage + USHORT usDefaultMVDDC; // board dependent parameter:Default Memory IO Voltage + UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module + UCHAR ucChannelNum; // board dependent parameter:Number of channel; + UCHAR ucChannelSize; // board dependent parameter:32bit or 64bit + UCHAR ucVREFI; // board dependnt parameter: EXT or INT +160mv to -140mv + UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters + UCHAR ucFlag; // To enable/disable functionalities based on memory type + ATOM_MEMORY_FORMAT asMemory; // describ all of video memory parameters from memory spec +}ATOM_VRAM_MODULE_V3; + + +//ATOM_VRAM_MODULE_V3.ucNPL_RT +#define NPL_RT_MASK 0x0f +#define BATTERY_ODT_MASK 0xc0 + +#define ATOM_VRAM_MODULE ATOM_VRAM_MODULE_V3 + +typedef struct _ATOM_VRAM_MODULE_V4 +{ + ULONG ulChannelMapCfg; // board dependent parameter: Channel combination + USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE + USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! + // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) + USHORT usReserved; + UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module + UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; + UCHAR ucChannelNum; // Number of channels present in this module config + UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits + UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 + UCHAR ucFlag; // To enable/disable functionalities based on memory type + UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 + UCHAR ucVREFI; // board dependent parameter + UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters + UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble + UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! + // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros + UCHAR ucReserved[3]; + +//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level + union{ + USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type + USHORT usDDR3_Reserved; + }; + union{ + USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type + USHORT usDDR3_MR3; // Used for DDR3 memory + }; + UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed + UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) + UCHAR ucReserved2[2]; + ATOM_MEMORY_TIMING_FORMAT asMemTiming[5];//Memory Timing block sort from lower clock to higher clock +}ATOM_VRAM_MODULE_V4; + +#define VRAM_MODULE_V4_MISC_RANK_MASK 0x3 +#define VRAM_MODULE_V4_MISC_DUAL_RANK 0x1 +#define VRAM_MODULE_V4_MISC_BL_MASK 0x4 +#define VRAM_MODULE_V4_MISC_BL8 0x4 +#define VRAM_MODULE_V4_MISC_DUAL_CS 0x10 + +typedef struct _ATOM_VRAM_MODULE_V5 +{ + ULONG ulChannelMapCfg; // board dependent parameter: Channel combination + USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE + USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! + // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) + USHORT usReserved; + UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module + UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; + UCHAR ucChannelNum; // Number of channels present in this module config + UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits + UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 + UCHAR ucFlag; // To enable/disable functionalities based on memory type + UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 + UCHAR ucVREFI; // board dependent parameter + UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters + UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble + UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! + // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros + UCHAR ucReserved[3]; + +//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level + USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type + USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type + UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed + UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) + UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth + UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth + ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock +}ATOM_VRAM_MODULE_V5; + + +typedef struct _ATOM_VRAM_MODULE_V6 +{ + ULONG ulChannelMapCfg; // board dependent parameter: Channel combination + USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE + USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! + // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) + USHORT usReserved; + UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module + UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; + UCHAR ucChannelNum; // Number of channels present in this module config + UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits + UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 + UCHAR ucFlag; // To enable/disable functionalities based on memory type + UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 + UCHAR ucVREFI; // board dependent parameter + UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters + UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble + UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! + // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros + UCHAR ucReserved[3]; + +//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level + USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type + USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type + UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed + UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) + UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth + UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth + ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock +}ATOM_VRAM_MODULE_V6; + +typedef struct _ATOM_VRAM_MODULE_V7 +{ +// Design Specific Values + ULONG ulChannelMapCfg; // mmMC_SHARED_CHREMAP + USHORT usModuleSize; // Size of ATOM_VRAM_MODULE_V7 + USHORT usPrivateReserved; // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) + USHORT usEnableChannels; // bit vector which indicate which channels are enabled + UCHAR ucExtMemoryID; // Current memory module ID + UCHAR ucMemoryType; // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5 + UCHAR ucChannelNum; // Number of mem. channels supported in this module + UCHAR ucChannelWidth; // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT + UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 + UCHAR ucReserve; // In MC7x, the lower 4 bits are used as bit8-11 of memory size. In other MC code, it's not used. + UCHAR ucMisc; // RANK_OF_THISMEMORY etc. + UCHAR ucVREFI; // Not used. + UCHAR ucNPL_RT; // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2. + UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble + UCHAR ucMemorySize; // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros + USHORT usSEQSettingOffset; + UCHAR ucReserved; +// Memory Module specific values + USHORT usEMRS2Value; // EMRS2/MR2 Value. + USHORT usEMRS3Value; // EMRS3/MR3 Value. + UCHAR ucMemoryVenderID; // [7:4] Revision, [3:0] Vendor code + UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) + UCHAR ucFIFODepth; // FIFO depth can be detected during vendor detection, here is hardcoded per memory + UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth + char strMemPNString[20]; // part number end with '0'. +}ATOM_VRAM_MODULE_V7; + + +typedef struct _ATOM_VRAM_MODULE_V8 +{ +// Design Specific Values + ULONG ulChannelMapCfg; // mmMC_SHARED_CHREMAP + USHORT usModuleSize; // Size of ATOM_VRAM_MODULE_V7 + USHORT usMcRamCfg; // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) + USHORT usEnableChannels; // bit vector which indicate which channels are enabled + UCHAR ucExtMemoryID; // Current memory module ID + UCHAR ucMemoryType; // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5 + UCHAR ucChannelNum; // Number of mem. channels supported in this module + UCHAR ucChannelWidth; // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT + UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 + UCHAR ucBankCol; // bit[3:2]= BANK ( =2:16bank, =1:8bank, =0:4bank ) bit[1:0]=Col ( =2: 10 bit, =1:9bit, =0:8bit ) + UCHAR ucMisc; // RANK_OF_THISMEMORY etc. + UCHAR ucVREFI; // Not used. + USHORT usReserved; // Not used + USHORT usMemorySize; // Total memory size in unit of MB for CONFIG_MEMSIZE zeros + UCHAR ucMcTunningSetId; // MC phy registers set per. + UCHAR ucRowNum; +// Memory Module specific values + USHORT usEMRS2Value; // EMRS2/MR2 Value. + USHORT usEMRS3Value; // EMRS3/MR3 Value. + UCHAR ucMemoryVenderID; // [7:4] Revision, [3:0] Vendor code + UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) + UCHAR ucFIFODepth; // FIFO depth can be detected during vendor detection, here is hardcoded per memory + UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth + + ULONG ulChannelMapCfg1; // channel mapping for channel8~15 + ULONG ulBankMapCfg; + ULONG ulReserved; + char strMemPNString[20]; // part number end with '0'. +}ATOM_VRAM_MODULE_V8; + + +typedef struct _ATOM_VRAM_INFO_V2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR ucNumOfVRAMModule; + ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; +}ATOM_VRAM_INFO_V2; + +typedef struct _ATOM_VRAM_INFO_V3 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting + USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting + USHORT usRerseved; + UCHAR aVID_PinsShift[9]; // 8 bit strap maximum+terminator + UCHAR ucNumOfVRAMModule; + ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; + ATOM_INIT_REG_BLOCK asMemPatch; // for allocation + +}ATOM_VRAM_INFO_V3; + +#define ATOM_VRAM_INFO_LAST ATOM_VRAM_INFO_V3 + +typedef struct _ATOM_VRAM_INFO_V4 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting + USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting + USHORT usRerseved; + UCHAR ucMemDQ7_0ByteRemap; // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3 + ULONG ulMemDQ7_0BitRemap; // each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21] + UCHAR ucReservde[4]; + UCHAR ucNumOfVRAMModule; + ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; + ATOM_INIT_REG_BLOCK asMemPatch; // for allocation +}ATOM_VRAM_INFO_V4; + +typedef struct _ATOM_VRAM_INFO_HEADER_V2_1 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting + USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting + USHORT usPerBytePresetOffset; // offset of ATOM_INIT_REG_BLOCK structure for Per Byte Offset Preset Settings + USHORT usReserved[3]; + UCHAR ucNumOfVRAMModule; // indicate number of VRAM module + UCHAR ucMemoryClkPatchTblVer; // version of memory AC timing register list + UCHAR ucVramModuleVer; // indicate ATOM_VRAM_MODUE version + UCHAR ucReserved; + ATOM_VRAM_MODULE_V7 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; +}ATOM_VRAM_INFO_HEADER_V2_1; + +typedef struct _ATOM_VRAM_INFO_HEADER_V2_2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting + USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting + USHORT usMcAdjustPerTileTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for Per Byte Offset Preset Settings + USHORT usMcPhyInitTableOffset; // offset of ATOM_INIT_REG_BLOCK structure for MC phy init set + USHORT usDramDataRemapTblOffset; // offset of ATOM_DRAM_DATA_REMAP array to indicate DRAM data lane to GPU mapping + USHORT usReserved1; + UCHAR ucNumOfVRAMModule; // indicate number of VRAM module + UCHAR ucMemoryClkPatchTblVer; // version of memory AC timing register list + UCHAR ucVramModuleVer; // indicate ATOM_VRAM_MODUE version + UCHAR ucMcPhyTileNum; // indicate the MCD tile number which use in DramDataRemapTbl and usMcAdjustPerTileTblOffset + ATOM_VRAM_MODULE_V8 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; +}ATOM_VRAM_INFO_HEADER_V2_2; + + +typedef struct _ATOM_DRAM_DATA_REMAP +{ + UCHAR ucByteRemapCh0; + UCHAR ucByteRemapCh1; + ULONG ulByte0BitRemapCh0; + ULONG ulByte1BitRemapCh0; + ULONG ulByte2BitRemapCh0; + ULONG ulByte3BitRemapCh0; + ULONG ulByte0BitRemapCh1; + ULONG ulByte1BitRemapCh1; + ULONG ulByte2BitRemapCh1; + ULONG ulByte3BitRemapCh1; +}ATOM_DRAM_DATA_REMAP; + +typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR aVID_PinsShift[9]; // 8 bit strap maximum+terminator +}ATOM_VRAM_GPIO_DETECTION_INFO; + + +typedef struct _ATOM_MEMORY_TRAINING_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR ucTrainingLoop; + UCHAR ucReserved[3]; + ATOM_INIT_REG_BLOCK asMemTrainingSetting; +}ATOM_MEMORY_TRAINING_INFO; + + +typedef struct SW_I2C_CNTL_DATA_PARAMETERS +{ + UCHAR ucControl; + UCHAR ucData; + UCHAR ucSatus; + UCHAR ucTemp; +} SW_I2C_CNTL_DATA_PARAMETERS; + +#define SW_I2C_CNTL_DATA_PS_ALLOCATION SW_I2C_CNTL_DATA_PARAMETERS + +typedef struct _SW_I2C_IO_DATA_PARAMETERS +{ + USHORT GPIO_Info; + UCHAR ucAct; + UCHAR ucData; + } SW_I2C_IO_DATA_PARAMETERS; + +#define SW_I2C_IO_DATA_PS_ALLOCATION SW_I2C_IO_DATA_PARAMETERS + +/****************************SW I2C CNTL DEFINITIONS**********************/ +#define SW_I2C_IO_RESET 0 +#define SW_I2C_IO_GET 1 +#define SW_I2C_IO_DRIVE 2 +#define SW_I2C_IO_SET 3 +#define SW_I2C_IO_START 4 + +#define SW_I2C_IO_CLOCK 0 +#define SW_I2C_IO_DATA 0x80 + +#define SW_I2C_IO_ZERO 0 +#define SW_I2C_IO_ONE 0x100 + +#define SW_I2C_CNTL_READ 0 +#define SW_I2C_CNTL_WRITE 1 +#define SW_I2C_CNTL_START 2 +#define SW_I2C_CNTL_STOP 3 +#define SW_I2C_CNTL_OPEN 4 +#define SW_I2C_CNTL_CLOSE 5 +#define SW_I2C_CNTL_WRITE1BIT 6 + +//==============================VESA definition Portion=============================== +#define VESA_OEM_PRODUCT_REV '01.00' +#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support +#define VESA_MODE_WIN_ATTRIBUTE 7 +#define VESA_WIN_SIZE 64 + +typedef struct _PTR_32_BIT_STRUCTURE +{ + USHORT Offset16; + USHORT Segment16; +} PTR_32_BIT_STRUCTURE; + +typedef union _PTR_32_BIT_UNION +{ + PTR_32_BIT_STRUCTURE SegmentOffset; + ULONG Ptr32_Bit; +} PTR_32_BIT_UNION; + +typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE +{ + UCHAR VbeSignature[4]; + USHORT VbeVersion; + PTR_32_BIT_UNION OemStringPtr; + UCHAR Capabilities[4]; + PTR_32_BIT_UNION VideoModePtr; + USHORT TotalMemory; +} VBE_1_2_INFO_BLOCK_UPDATABLE; + + +typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE +{ + VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock; + USHORT OemSoftRev; + PTR_32_BIT_UNION OemVendorNamePtr; + PTR_32_BIT_UNION OemProductNamePtr; + PTR_32_BIT_UNION OemProductRevPtr; +} VBE_2_0_INFO_BLOCK_UPDATABLE; + +typedef union _VBE_VERSION_UNION +{ + VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock; + VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock; +} VBE_VERSION_UNION; + +typedef struct _VBE_INFO_BLOCK +{ + VBE_VERSION_UNION UpdatableVBE_Info; + UCHAR Reserved[222]; + UCHAR OemData[256]; +} VBE_INFO_BLOCK; + +typedef struct _VBE_FP_INFO +{ + USHORT HSize; + USHORT VSize; + USHORT FPType; + UCHAR RedBPP; + UCHAR GreenBPP; + UCHAR BlueBPP; + UCHAR ReservedBPP; + ULONG RsvdOffScrnMemSize; + ULONG RsvdOffScrnMEmPtr; + UCHAR Reserved[14]; +} VBE_FP_INFO; + +typedef struct _VESA_MODE_INFO_BLOCK +{ +// Mandatory information for all VBE revisions + USHORT ModeAttributes; // dw ? ; mode attributes + UCHAR WinAAttributes; // db ? ; window A attributes + UCHAR WinBAttributes; // db ? ; window B attributes + USHORT WinGranularity; // dw ? ; window granularity + USHORT WinSize; // dw ? ; window size + USHORT WinASegment; // dw ? ; window A start segment + USHORT WinBSegment; // dw ? ; window B start segment + ULONG WinFuncPtr; // dd ? ; real mode pointer to window function + USHORT BytesPerScanLine;// dw ? ; bytes per scan line + +//; Mandatory information for VBE 1.2 and above + USHORT XResolution; // dw ? ; horizontal resolution in pixels or characters + USHORT YResolution; // dw ? ; vertical resolution in pixels or characters + UCHAR XCharSize; // db ? ; character cell width in pixels + UCHAR YCharSize; // db ? ; character cell height in pixels + UCHAR NumberOfPlanes; // db ? ; number of memory planes + UCHAR BitsPerPixel; // db ? ; bits per pixel + UCHAR NumberOfBanks; // db ? ; number of banks + UCHAR MemoryModel; // db ? ; memory model type + UCHAR BankSize; // db ? ; bank size in KB + UCHAR NumberOfImagePages;// db ? ; number of images + UCHAR ReservedForPageFunction;//db 1 ; reserved for page function + +//; Direct Color fields(required for direct/6 and YUV/7 memory models) + UCHAR RedMaskSize; // db ? ; size of direct color red mask in bits + UCHAR RedFieldPosition; // db ? ; bit position of lsb of red mask + UCHAR GreenMaskSize; // db ? ; size of direct color green mask in bits + UCHAR GreenFieldPosition; // db ? ; bit position of lsb of green mask + UCHAR BlueMaskSize; // db ? ; size of direct color blue mask in bits + UCHAR BlueFieldPosition; // db ? ; bit position of lsb of blue mask + UCHAR RsvdMaskSize; // db ? ; size of direct color reserved mask in bits + UCHAR RsvdFieldPosition; // db ? ; bit position of lsb of reserved mask + UCHAR DirectColorModeInfo;// db ? ; direct color mode attributes + +//; Mandatory information for VBE 2.0 and above + ULONG PhysBasePtr; // dd ? ; physical address for flat memory frame buffer + ULONG Reserved_1; // dd 0 ; reserved - always set to 0 + USHORT Reserved_2; // dw 0 ; reserved - always set to 0 + +//; Mandatory information for VBE 3.0 and above + USHORT LinBytesPerScanLine; // dw ? ; bytes per scan line for linear modes + UCHAR BnkNumberOfImagePages;// db ? ; number of images for banked modes + UCHAR LinNumberOfImagPages; // db ? ; number of images for linear modes + UCHAR LinRedMaskSize; // db ? ; size of direct color red mask(linear modes) + UCHAR LinRedFieldPosition; // db ? ; bit position of lsb of red mask(linear modes) + UCHAR LinGreenMaskSize; // db ? ; size of direct color green mask(linear modes) + UCHAR LinGreenFieldPosition;// db ? ; bit position of lsb of green mask(linear modes) + UCHAR LinBlueMaskSize; // db ? ; size of direct color blue mask(linear modes) + UCHAR LinBlueFieldPosition; // db ? ; bit position of lsb of blue mask(linear modes) + UCHAR LinRsvdMaskSize; // db ? ; size of direct color reserved mask(linear modes) + UCHAR LinRsvdFieldPosition; // db ? ; bit position of lsb of reserved mask(linear modes) + ULONG MaxPixelClock; // dd ? ; maximum pixel clock(in Hz) for graphics mode + UCHAR Reserved; // db 190 dup (0) +} VESA_MODE_INFO_BLOCK; + +// BIOS function CALLS +#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 // ATI Extended Function code +#define ATOM_BIOS_FUNCTION_COP_MODE 0x00 +#define ATOM_BIOS_FUNCTION_SHORT_QUERY1 0x04 +#define ATOM_BIOS_FUNCTION_SHORT_QUERY2 0x05 +#define ATOM_BIOS_FUNCTION_SHORT_QUERY3 0x06 +#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B +#define ATOM_BIOS_FUNCTION_ASIC_DSTATE 0x0E +#define ATOM_BIOS_FUNCTION_DEBUG_PLAY 0x0F +#define ATOM_BIOS_FUNCTION_STV_STD 0x16 +#define ATOM_BIOS_FUNCTION_DEVICE_DET 0x17 +#define ATOM_BIOS_FUNCTION_DEVICE_SWITCH 0x18 + +#define ATOM_BIOS_FUNCTION_PANEL_CONTROL 0x82 +#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET 0x83 +#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH 0x84 +#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A +#define ATOM_BIOS_FUNCTION_SET_CMOS 0x8B +#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 // Sub function 80 +#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 // Sub function 80 + +#define ATOM_BIOS_FUNCTION_DISPLAY_INFO 0x8D +#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF 0x8E +#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F +#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 // Sub function 03 +#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 // Sub function 7 +#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 // Notify caller the current thermal state +#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 // Notify caller the current critical state +#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 // Sub function 85 +#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900// Sub function 89 +#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 // Notify caller that ADC is supported + + +#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 // Set DPMS +#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 // BL: Sub function 01 +#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 // BL: Sub function 02 +#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 // BH Parameter for DPMS ON. +#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 // BH Parameter for DPMS STANDBY +#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 // BH Parameter for DPMS SUSPEND +#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 // BH Parameter for DPMS OFF +#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 // BH Parameter for DPMS REDUCE ON (NOT SUPPORTED) + +#define ATOM_BIOS_RETURN_CODE_MASK 0x0000FF00L +#define ATOM_BIOS_REG_HIGH_MASK 0x0000FF00L +#define ATOM_BIOS_REG_LOW_MASK 0x000000FFL + +// structure used for VBIOS only + +//DispOutInfoTable +typedef struct _ASIC_TRANSMITTER_INFO +{ + USHORT usTransmitterObjId; + USHORT usSupportDevice; + UCHAR ucTransmitterCmdTblId; + UCHAR ucConfig; + UCHAR ucEncoderID; //available 1st encoder ( default ) + UCHAR ucOptionEncoderID; //available 2nd encoder ( optional ) + UCHAR uc2ndEncoderID; + UCHAR ucReserved; +}ASIC_TRANSMITTER_INFO; + +#define ASIC_TRANSMITTER_INFO_CONFIG__DVO_SDR_MODE 0x01 +#define ASIC_TRANSMITTER_INFO_CONFIG__COHERENT_MODE 0x02 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODEROBJ_ID_MASK 0xc4 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_A 0x00 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_B 0x04 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_C 0x40 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_D 0x44 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_E 0x80 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_F 0x84 + +typedef struct _ASIC_ENCODER_INFO +{ + UCHAR ucEncoderID; + UCHAR ucEncoderConfig; + USHORT usEncoderCmdTblId; +}ASIC_ENCODER_INFO; + +typedef struct _ATOM_DISP_OUT_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT ptrTransmitterInfo; + USHORT ptrEncoderInfo; + ASIC_TRANSMITTER_INFO asTransmitterInfo[1]; + ASIC_ENCODER_INFO asEncoderInfo[1]; +}ATOM_DISP_OUT_INFO; + + +typedef struct _ATOM_DISP_OUT_INFO_V2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT ptrTransmitterInfo; + USHORT ptrEncoderInfo; + USHORT ptrMainCallParserFar; // direct address of main parser call in VBIOS binary. + ASIC_TRANSMITTER_INFO asTransmitterInfo[1]; + ASIC_ENCODER_INFO asEncoderInfo[1]; +}ATOM_DISP_OUT_INFO_V2; + + +typedef struct _ATOM_DISP_CLOCK_ID { + UCHAR ucPpllId; + UCHAR ucPpllAttribute; +}ATOM_DISP_CLOCK_ID; + +// ucPpllAttribute +#define CLOCK_SOURCE_SHAREABLE 0x01 +#define CLOCK_SOURCE_DP_MODE 0x02 +#define CLOCK_SOURCE_NONE_DP_MODE 0x04 + +//DispOutInfoTable +typedef struct _ASIC_TRANSMITTER_INFO_V2 +{ + USHORT usTransmitterObjId; + USHORT usDispClkIdOffset; // point to clock source id list supported by Encoder Object + UCHAR ucTransmitterCmdTblId; + UCHAR ucConfig; + UCHAR ucEncoderID; // available 1st encoder ( default ) + UCHAR ucOptionEncoderID; // available 2nd encoder ( optional ) + UCHAR uc2ndEncoderID; + UCHAR ucReserved; +}ASIC_TRANSMITTER_INFO_V2; + +typedef struct _ATOM_DISP_OUT_INFO_V3 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT ptrTransmitterInfo; + USHORT ptrEncoderInfo; + USHORT ptrMainCallParserFar; // direct address of main parser call in VBIOS binary. + USHORT usReserved; + UCHAR ucDCERevision; + UCHAR ucMaxDispEngineNum; + UCHAR ucMaxActiveDispEngineNum; + UCHAR ucMaxPPLLNum; + UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE + UCHAR ucDispCaps; + UCHAR ucReserved[2]; + ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only +}ATOM_DISP_OUT_INFO_V3; + +//ucDispCaps +#define DISPLAY_CAPS__DP_PCLK_FROM_PPLL 0x01 +#define DISPLAY_CAPS__FORCE_DISPDEV_CONNECTED 0x02 + +typedef enum CORE_REF_CLK_SOURCE{ + CLOCK_SRC_XTALIN=0, + CLOCK_SRC_XO_IN=1, + CLOCK_SRC_XO_IN2=2, +}CORE_REF_CLK_SOURCE; + +// DispDevicePriorityInfo +typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT asDevicePriority[16]; +}ATOM_DISPLAY_DEVICE_PRIORITY_INFO; + +//ProcessAuxChannelTransactionTable +typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS +{ + USHORT lpAuxRequest; + USHORT lpDataOut; + UCHAR ucChannelID; + union + { + UCHAR ucReplyStatus; + UCHAR ucDelay; + }; + UCHAR ucDataOutLen; + UCHAR ucReserved; +}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS; + +//ProcessAuxChannelTransactionTable +typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 +{ + USHORT lpAuxRequest; + USHORT lpDataOut; + UCHAR ucChannelID; + union + { + UCHAR ucReplyStatus; + UCHAR ucDelay; + }; + UCHAR ucDataOutLen; + UCHAR ucHPD_ID; //=0: HPD1, =1: HPD2, =2: HPD3, =3: HPD4, =4: HPD5, =5: HPD6 +}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2; + +#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS + +//GetSinkType + +typedef struct _DP_ENCODER_SERVICE_PARAMETERS +{ + USHORT ucLinkClock; + union + { + UCHAR ucConfig; // for DP training command + UCHAR ucI2cId; // use for GET_SINK_TYPE command + }; + UCHAR ucAction; + UCHAR ucStatus; + UCHAR ucLaneNum; + UCHAR ucReserved[2]; +}DP_ENCODER_SERVICE_PARAMETERS; + +// ucAction +#define ATOM_DP_ACTION_GET_SINK_TYPE 0x01 + +#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS + + +typedef struct _DP_ENCODER_SERVICE_PARAMETERS_V2 +{ + USHORT usExtEncoderObjId; // External Encoder Object Id, output parameter only, use when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION + UCHAR ucAuxId; + UCHAR ucAction; + UCHAR ucSinkType; // Iput and Output parameters. + UCHAR ucHPDId; // Input parameter, used when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION + UCHAR ucReserved[2]; +}DP_ENCODER_SERVICE_PARAMETERS_V2; + +typedef struct _DP_ENCODER_SERVICE_PS_ALLOCATION_V2 +{ + DP_ENCODER_SERVICE_PARAMETERS_V2 asDPServiceParam; + PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 asAuxParam; +}DP_ENCODER_SERVICE_PS_ALLOCATION_V2; + +// ucAction +#define DP_SERVICE_V2_ACTION_GET_SINK_TYPE 0x01 +#define DP_SERVICE_V2_ACTION_DET_LCD_CONNECTION 0x02 + + +// DP_TRAINING_TABLE +#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR +#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 ) +#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16 ) +#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24 ) +#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 32) +#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 40) +#define DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 48) +#define DP_I2C_AUX_DDC_WRITE_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 60) +#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 64) +#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 72) +#define DP_I2C_AUX_DDC_READ_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 76) +#define DP_I2C_AUX_DDC_WRITE_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80) +#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 84) + + +typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS +{ + UCHAR ucI2CSpeed; + union + { + UCHAR ucRegIndex; + UCHAR ucStatus; + }; + USHORT lpI2CDataOut; + UCHAR ucFlag; + UCHAR ucTransBytes; + UCHAR ucSlaveAddr; + UCHAR ucLineNumber; +}PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS; + +#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS + +//ucFlag +#define HW_I2C_WRITE 1 +#define HW_I2C_READ 0 +#define I2C_2BYTE_ADDR 0x02 + +/****************************************************************************/ +// Structures used by HW_Misc_OperationTable +/****************************************************************************/ +typedef struct _ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1 +{ + UCHAR ucCmd; // Input: To tell which action to take + UCHAR ucReserved[3]; + ULONG ulReserved; +}ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1; + +typedef struct _ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1 +{ + UCHAR ucReturnCode; // Output: Return value base on action was taken + UCHAR ucReserved[3]; + ULONG ulReserved; +}ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1; + +// Actions code +#define ATOM_GET_SDI_SUPPORT 0xF0 + +// Return code +#define ATOM_UNKNOWN_CMD 0 +#define ATOM_FEATURE_NOT_SUPPORTED 1 +#define ATOM_FEATURE_SUPPORTED 2 + +typedef struct _ATOM_HW_MISC_OPERATION_PS_ALLOCATION +{ + ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1 sInput_Output; + PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS sReserved; +}ATOM_HW_MISC_OPERATION_PS_ALLOCATION; + +/****************************************************************************/ + +typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2 +{ + UCHAR ucHWBlkInst; // HW block instance, 0, 1, 2, ... + UCHAR ucReserved[3]; +}SET_HWBLOCK_INSTANCE_PARAMETER_V2; + +#define HWBLKINST_INSTANCE_MASK 0x07 +#define HWBLKINST_HWBLK_MASK 0xF0 +#define HWBLKINST_HWBLK_SHIFT 0x04 + +//ucHWBlock +#define SELECT_DISP_ENGINE 0 +#define SELECT_DISP_PLL 1 +#define SELECT_DCIO_UNIPHY_LINK0 2 +#define SELECT_DCIO_UNIPHY_LINK1 3 +#define SELECT_DCIO_IMPCAL 4 +#define SELECT_DCIO_DIG 6 +#define SELECT_CRTC_PIXEL_RATE 7 +#define SELECT_VGA_BLK 8 + +// DIGTransmitterInfoTable structure used to program UNIPHY settings +typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_1{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usDPVsPreEmphSettingOffset; // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock + USHORT usPhyAnalogRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info + USHORT usPhyAnalogSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range + USHORT usPhyPllRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info + USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings +}DIG_TRANSMITTER_INFO_HEADER_V3_1; + +typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_2{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usDPVsPreEmphSettingOffset; // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock + USHORT usPhyAnalogRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info + USHORT usPhyAnalogSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range + USHORT usPhyPllRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info + USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings + USHORT usDPSSRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy SS Pll register Info + USHORT usDPSSSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy SS Pll Settings +}DIG_TRANSMITTER_INFO_HEADER_V3_2; + + +typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_3{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usDPVsPreEmphSettingOffset; // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock + USHORT usPhyAnalogRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info + USHORT usPhyAnalogSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range + USHORT usPhyPllRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info + USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings + USHORT usDPSSRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy SS Pll register Info + USHORT usDPSSSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy SS Pll Settings + USHORT usEDPVsLegacyModeOffset; // offset of PHY_ANALOG_SETTING_INFO * with eDP Legacy Mode Voltage Swing and Pre-Emphasis for each Link clock + USHORT useDPVsLowVdiffModeOffset; // offset of PHY_ANALOG_SETTING_INFO * with eDP Low VDiff Mode Voltage Swing and Pre-Emphasis for each Link clock + USHORT useDPVsHighVdiffModeOffset; // offset of PHY_ANALOG_SETTING_INFO * with eDP High VDiff Mode Voltage Swing and Pre-Emphasis for each Link clock + USHORT useDPVsStretchModeOffset; // offset of PHY_ANALOG_SETTING_INFO * with eDP Stretch Mode Voltage Swing and Pre-Emphasis for each Link clock + USHORT useDPVsSingleVdiffModeOffset; // offset of PHY_ANALOG_SETTING_INFO * with eDP Single Vdiff Mode Voltage Swing and Pre-Emphasis for each Link clock + USHORT useDPVsVariablePremModeOffset; // offset of PHY_ANALOG_SETTING_INFO * with eDP Single Vidff+Variable PreEmphasis Voltage Swing and Pre-Emphasis for each Link clock +}DIG_TRANSMITTER_INFO_HEADER_V3_3; + + +typedef struct _CLOCK_CONDITION_REGESTER_INFO{ + USHORT usRegisterIndex; + UCHAR ucStartBit; + UCHAR ucEndBit; +}CLOCK_CONDITION_REGESTER_INFO; + +typedef struct _CLOCK_CONDITION_SETTING_ENTRY{ + USHORT usMaxClockFreq; + UCHAR ucEncodeMode; + UCHAR ucPhySel; + ULONG ulAnalogSetting[1]; +}CLOCK_CONDITION_SETTING_ENTRY; + +typedef struct _CLOCK_CONDITION_SETTING_INFO{ + USHORT usEntrySize; + CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[1]; +}CLOCK_CONDITION_SETTING_INFO; + +typedef struct _PHY_CONDITION_REG_VAL{ + ULONG ulCondition; + ULONG ulRegVal; +}PHY_CONDITION_REG_VAL; + +typedef struct _PHY_CONDITION_REG_VAL_V2{ + ULONG ulCondition; + UCHAR ucCondition2; + ULONG ulRegVal; +}PHY_CONDITION_REG_VAL_V2; + +typedef struct _PHY_CONDITION_REG_INFO{ + USHORT usRegIndex; + USHORT usSize; + PHY_CONDITION_REG_VAL asRegVal[1]; +}PHY_CONDITION_REG_INFO; + +typedef struct _PHY_CONDITION_REG_INFO_V2{ + USHORT usRegIndex; + USHORT usSize; + PHY_CONDITION_REG_VAL_V2 asRegVal[1]; +}PHY_CONDITION_REG_INFO_V2; + +typedef struct _PHY_ANALOG_SETTING_INFO{ + UCHAR ucEncodeMode; + UCHAR ucPhySel; + USHORT usSize; + PHY_CONDITION_REG_INFO asAnalogSetting[1]; +}PHY_ANALOG_SETTING_INFO; + +typedef struct _PHY_ANALOG_SETTING_INFO_V2{ + UCHAR ucEncodeMode; + UCHAR ucPhySel; + USHORT usSize; + PHY_CONDITION_REG_INFO_V2 asAnalogSetting[1]; +}PHY_ANALOG_SETTING_INFO_V2; + + +typedef struct _GFX_HAVESTING_PARAMETERS { + UCHAR ucGfxBlkId; //GFX blk id to be harvested, like CU, RB or PRIM + UCHAR ucReserved; //reserved + UCHAR ucActiveUnitNumPerSH; //requested active CU/RB/PRIM number per shader array + UCHAR ucMaxUnitNumPerSH; //max CU/RB/PRIM number per shader array +} GFX_HAVESTING_PARAMETERS; + +//ucGfxBlkId +#define GFX_HARVESTING_CU_ID 0 +#define GFX_HARVESTING_RB_ID 1 +#define GFX_HARVESTING_PRIM_ID 2 + + +typedef struct _VBIOS_ROM_HEADER{ + UCHAR PciRomSignature[2]; + UCHAR ucPciRomSizeIn512bytes; + UCHAR ucJumpCoreMainInitBIOS; + USHORT usLabelCoreMainInitBIOS; + UCHAR PciReservedSpace[18]; + USHORT usPciDataStructureOffset; + UCHAR Rsvd1d_1a[4]; + char strIbm[3]; + UCHAR CheckSum[14]; + UCHAR ucBiosMsgNumber; + char str761295520[16]; + USHORT usLabelCoreVPOSTNoMode; + USHORT usSpecialPostOffset; + UCHAR ucSpeicalPostImageSizeIn512Bytes; + UCHAR Rsved47_45[3]; + USHORT usROM_HeaderInformationTableOffset; + UCHAR Rsved4f_4a[6]; + char strBuildTimeStamp[20]; + UCHAR ucJumpCoreXFuncFarHandler; + USHORT usCoreXFuncFarHandlerOffset; + UCHAR ucRsved67; + UCHAR ucJumpCoreVFuncFarHandler; + USHORT usCoreVFuncFarHandlerOffset; + UCHAR Rsved6d_6b[3]; + USHORT usATOM_BIOS_MESSAGE_Offset; +}VBIOS_ROM_HEADER; + +/****************************************************************************/ +//Portion VI: Definitinos for vbios MC scratch registers that driver used +/****************************************************************************/ + +#define MC_MISC0__MEMORY_TYPE_MASK 0xF0000000 +#define MC_MISC0__MEMORY_TYPE__GDDR1 0x10000000 +#define MC_MISC0__MEMORY_TYPE__DDR2 0x20000000 +#define MC_MISC0__MEMORY_TYPE__GDDR3 0x30000000 +#define MC_MISC0__MEMORY_TYPE__GDDR4 0x40000000 +#define MC_MISC0__MEMORY_TYPE__GDDR5 0x50000000 +#define MC_MISC0__MEMORY_TYPE__HBM 0x60000000 +#define MC_MISC0__MEMORY_TYPE__DDR3 0xB0000000 + +#define ATOM_MEM_TYPE_DDR_STRING "DDR" +#define ATOM_MEM_TYPE_DDR2_STRING "DDR2" +#define ATOM_MEM_TYPE_GDDR3_STRING "GDDR3" +#define ATOM_MEM_TYPE_GDDR4_STRING "GDDR4" +#define ATOM_MEM_TYPE_GDDR5_STRING "GDDR5" +#define ATOM_MEM_TYPE_HBM_STRING "HBM" +#define ATOM_MEM_TYPE_DDR3_STRING "DDR3" + +/****************************************************************************/ +//Portion VII: Definitinos being oboselete +/****************************************************************************/ + +//========================================================================================== +//Remove the definitions below when driver is ready! +typedef struct _ATOM_DAC_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usMaxFrequency; // in 10kHz unit + USHORT usReserved; +}ATOM_DAC_INFO; + + +typedef struct _COMPASSIONATE_DATA +{ + ATOM_COMMON_TABLE_HEADER sHeader; + + //============================== DAC1 portion + UCHAR ucDAC1_BG_Adjustment; + UCHAR ucDAC1_DAC_Adjustment; + USHORT usDAC1_FORCE_Data; + //============================== DAC2 portion + UCHAR ucDAC2_CRT2_BG_Adjustment; + UCHAR ucDAC2_CRT2_DAC_Adjustment; + USHORT usDAC2_CRT2_FORCE_Data; + USHORT usDAC2_CRT2_MUX_RegisterIndex; + UCHAR ucDAC2_CRT2_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low + UCHAR ucDAC2_NTSC_BG_Adjustment; + UCHAR ucDAC2_NTSC_DAC_Adjustment; + USHORT usDAC2_TV1_FORCE_Data; + USHORT usDAC2_TV1_MUX_RegisterIndex; + UCHAR ucDAC2_TV1_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low + UCHAR ucDAC2_CV_BG_Adjustment; + UCHAR ucDAC2_CV_DAC_Adjustment; + USHORT usDAC2_CV_FORCE_Data; + USHORT usDAC2_CV_MUX_RegisterIndex; + UCHAR ucDAC2_CV_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low + UCHAR ucDAC2_PAL_BG_Adjustment; + UCHAR ucDAC2_PAL_DAC_Adjustment; + USHORT usDAC2_TV2_FORCE_Data; +}COMPASSIONATE_DATA; + +/****************************Supported Device Info Table Definitions**********************/ +// ucConnectInfo: +// [7:4] - connector type +// = 1 - VGA connector +// = 2 - DVI-I +// = 3 - DVI-D +// = 4 - DVI-A +// = 5 - SVIDEO +// = 6 - COMPOSITE +// = 7 - LVDS +// = 8 - DIGITAL LINK +// = 9 - SCART +// = 0xA - HDMI_type A +// = 0xB - HDMI_type B +// = 0xE - Special case1 (DVI+DIN) +// Others=TBD +// [3:0] - DAC Associated +// = 0 - no DAC +// = 1 - DACA +// = 2 - DACB +// = 3 - External DAC +// Others=TBD +// + +typedef struct _ATOM_CONNECTOR_INFO +{ +#if ATOM_BIG_ENDIAN + UCHAR bfConnectorType:4; + UCHAR bfAssociatedDAC:4; +#else + UCHAR bfAssociatedDAC:4; + UCHAR bfConnectorType:4; +#endif +}ATOM_CONNECTOR_INFO; + +typedef union _ATOM_CONNECTOR_INFO_ACCESS +{ + ATOM_CONNECTOR_INFO sbfAccess; + UCHAR ucAccess; +}ATOM_CONNECTOR_INFO_ACCESS; + +typedef struct _ATOM_CONNECTOR_INFO_I2C +{ + ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo; + ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; +}ATOM_CONNECTOR_INFO_I2C; + + +typedef struct _ATOM_SUPPORTED_DEVICES_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usDeviceSupport; + ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO]; +}ATOM_SUPPORTED_DEVICES_INFO; + +#define NO_INT_SRC_MAPPED 0xFF + +typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP +{ + UCHAR ucIntSrcBitmap; +}ATOM_CONNECTOR_INC_SRC_BITMAP; + +typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usDeviceSupport; + ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2]; + ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2]; +}ATOM_SUPPORTED_DEVICES_INFO_2; + +typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usDeviceSupport; + ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE]; + ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE]; +}ATOM_SUPPORTED_DEVICES_INFO_2d1; + +#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1 + + + +typedef struct _ATOM_MISC_CONTROL_INFO +{ + USHORT usFrequency; + UCHAR ucPLL_ChargePump; // PLL charge-pump gain control + UCHAR ucPLL_DutyCycle; // PLL duty cycle control + UCHAR ucPLL_VCO_Gain; // PLL VCO gain control + UCHAR ucPLL_VoltageSwing; // PLL driver voltage swing control +}ATOM_MISC_CONTROL_INFO; + + +#define ATOM_MAX_MISC_INFO 4 + +typedef struct _ATOM_TMDS_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usMaxFrequency; // in 10Khz + ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO]; +}ATOM_TMDS_INFO; + + +typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE +{ + UCHAR ucTVStandard; //Same as TV standards defined above, + UCHAR ucPadding[1]; +}ATOM_ENCODER_ANALOG_ATTRIBUTE; + +typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE +{ + UCHAR ucAttribute; //Same as other digital encoder attributes defined above + UCHAR ucPadding[1]; +}ATOM_ENCODER_DIGITAL_ATTRIBUTE; + +typedef union _ATOM_ENCODER_ATTRIBUTE +{ + ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib; + ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib; +}ATOM_ENCODER_ATTRIBUTE; + + +typedef struct _DVO_ENCODER_CONTROL_PARAMETERS +{ + USHORT usPixelClock; + USHORT usEncoderID; + UCHAR ucDeviceType; //Use ATOM_DEVICE_xxx1_Index to indicate device type only. + UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT + ATOM_ENCODER_ATTRIBUTE usDevAttr; +}DVO_ENCODER_CONTROL_PARAMETERS; + +typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION +{ + DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder; + WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion +}DVO_ENCODER_CONTROL_PS_ALLOCATION; + + +#define ATOM_XTMDS_ASIC_SI164_ID 1 +#define ATOM_XTMDS_ASIC_SI178_ID 2 +#define ATOM_XTMDS_ASIC_TFP513_ID 3 +#define ATOM_XTMDS_SUPPORTED_SINGLELINK 0x00000001 +#define ATOM_XTMDS_SUPPORTED_DUALLINK 0x00000002 +#define ATOM_XTMDS_MVPU_FPGA 0x00000004 + + +typedef struct _ATOM_XTMDS_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usSingleLinkMaxFrequency; + ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //Point the ID on which I2C is used to control external chip + UCHAR ucXtransimitterID; + UCHAR ucSupportedLink; // Bit field, bit0=1, single link supported;bit1=1,dual link supported + UCHAR ucSequnceAlterID; // Even with the same external TMDS asic, it's possible that the program seqence alters + // due to design. This ID is used to alert driver that the sequence is not "standard"! + UCHAR ucMasterAddress; // Address to control Master xTMDS Chip + UCHAR ucSlaveAddress; // Address to control Slave xTMDS Chip +}ATOM_XTMDS_INFO; + +typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS +{ + UCHAR ucEnable; // ATOM_ENABLE=On or ATOM_DISABLE=Off + UCHAR ucDevice; // ATOM_DEVICE_DFP1_INDEX.... + UCHAR ucPadding[2]; +}DFP_DPMS_STATUS_CHANGE_PARAMETERS; + +/****************************Legacy Power Play Table Definitions **********************/ + +//Definitions for ulPowerPlayMiscInfo +#define ATOM_PM_MISCINFO_SPLIT_CLOCK 0x00000000L +#define ATOM_PM_MISCINFO_USING_MCLK_SRC 0x00000001L +#define ATOM_PM_MISCINFO_USING_SCLK_SRC 0x00000002L + +#define ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT 0x00000004L +#define ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH 0x00000008L + +#define ATOM_PM_MISCINFO_LOAD_PERFORMANCE_EN 0x00000010L + +#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN 0x00000020L +#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN 0x00000040L +#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L //When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program + +#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN 0x00000100L +#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN 0x00000200L +#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN 0x00000400L +#define ATOM_PM_MISCINFO_LOAD_BALANCE_EN 0x00000800L +#define ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE 0x00001000L +#define ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE 0x00002000L +#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE 0x00004000L + +#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE 0x00008000L +#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L +#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE 0x00020000L +#define ATOM_PM_MISCINFO_POWER_SAVING_MODE 0x00040000L +#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE 0x00080000L + +#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L //0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved +#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20 + +#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE 0x00400000L +#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2 0x00800000L +#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4 0x01000000L +#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L //When set, Dynamic +#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L //When set, Dynamic +#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L //When set, This mode is for acceleated 3D mode + +#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L //1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks) +#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT 28 +#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS 0x80000000L + +#define ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE 0x00000001L +#define ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT 0x00000002L +#define ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN 0x00000004L +#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO 0x00000008L +#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE 0x00000010L +#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN 0x00000020L +#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L //If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption. + //If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback +#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC 0x00000080L +#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN 0x00000100L +#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L + +//ucTableFormatRevision=1 +//ucTableContentRevision=1 +typedef struct _ATOM_POWERMODE_INFO +{ + ULONG ulMiscInfo; //The power level should be arranged in ascending order + ULONG ulReserved1; // must set to 0 + ULONG ulReserved2; // must set to 0 + USHORT usEngineClock; + USHORT usMemoryClock; + UCHAR ucVoltageDropIndex; // index to GPIO table + UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate + UCHAR ucMinTemperature; + UCHAR ucMaxTemperature; + UCHAR ucNumPciELanes; // number of PCIE lanes +}ATOM_POWERMODE_INFO; + +//ucTableFormatRevision=2 +//ucTableContentRevision=1 +typedef struct _ATOM_POWERMODE_INFO_V2 +{ + ULONG ulMiscInfo; //The power level should be arranged in ascending order + ULONG ulMiscInfo2; + ULONG ulEngineClock; + ULONG ulMemoryClock; + UCHAR ucVoltageDropIndex; // index to GPIO table + UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate + UCHAR ucMinTemperature; + UCHAR ucMaxTemperature; + UCHAR ucNumPciELanes; // number of PCIE lanes +}ATOM_POWERMODE_INFO_V2; + +//ucTableFormatRevision=2 +//ucTableContentRevision=2 +typedef struct _ATOM_POWERMODE_INFO_V3 +{ + ULONG ulMiscInfo; //The power level should be arranged in ascending order + ULONG ulMiscInfo2; + ULONG ulEngineClock; + ULONG ulMemoryClock; + UCHAR ucVoltageDropIndex; // index to Core (VDDC) votage table + UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate + UCHAR ucMinTemperature; + UCHAR ucMaxTemperature; + UCHAR ucNumPciELanes; // number of PCIE lanes + UCHAR ucVDDCI_VoltageDropIndex; // index to VDDCI votage table +}ATOM_POWERMODE_INFO_V3; + + +#define ATOM_MAX_NUMBEROF_POWER_BLOCK 8 + +#define ATOM_PP_OVERDRIVE_INTBITMAP_AUXWIN 0x01 +#define ATOM_PP_OVERDRIVE_INTBITMAP_OVERDRIVE 0x02 + +#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM63 0x01 +#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1032 0x02 +#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1030 0x03 +#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649 0x04 +#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64 0x05 +#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375 0x06 +#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 // Andigilog + + +typedef struct _ATOM_POWERPLAY_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR ucOverdriveThermalController; + UCHAR ucOverdriveI2cLine; + UCHAR ucOverdriveIntBitmap; + UCHAR ucOverdriveControllerAddress; + UCHAR ucSizeOfPowerModeEntry; + UCHAR ucNumOfPowerModeEntries; + ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; +}ATOM_POWERPLAY_INFO; + +typedef struct _ATOM_POWERPLAY_INFO_V2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR ucOverdriveThermalController; + UCHAR ucOverdriveI2cLine; + UCHAR ucOverdriveIntBitmap; + UCHAR ucOverdriveControllerAddress; + UCHAR ucSizeOfPowerModeEntry; + UCHAR ucNumOfPowerModeEntries; + ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; +}ATOM_POWERPLAY_INFO_V2; + +typedef struct _ATOM_POWERPLAY_INFO_V3 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR ucOverdriveThermalController; + UCHAR ucOverdriveI2cLine; + UCHAR ucOverdriveIntBitmap; + UCHAR ucOverdriveControllerAddress; + UCHAR ucSizeOfPowerModeEntry; + UCHAR ucNumOfPowerModeEntries; + ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; +}ATOM_POWERPLAY_INFO_V3; + + + +/**************************************************************************/ + + +// Following definitions are for compatiblity issue in different SW components. +#define ATOM_MASTER_DATA_TABLE_REVISION 0x01 +#define Object_Info Object_Header +#define AdjustARB_SEQ MC_InitParameter +#define VRAM_GPIO_DetectionInfo VoltageObjectInfo +#define ASIC_VDDCI_Info ASIC_ProfilingInfo +#define ASIC_MVDDQ_Info MemoryTrainingInfo +#define SS_Info PPLL_SS_Info +#define ASIC_MVDDC_Info ASIC_InternalSS_Info +#define DispDevicePriorityInfo SaveRestoreInfo +#define DispOutInfo TV_VideoMode + + +#define ATOM_ENCODER_OBJECT_TABLE ATOM_OBJECT_TABLE +#define ATOM_CONNECTOR_OBJECT_TABLE ATOM_OBJECT_TABLE + +//New device naming, remove them when both DAL/VBIOS is ready +#define DFP2I_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS +#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS + +#define DFP1X_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS +#define DFP1X_OUTPUT_CONTROL_PS_ALLOCATION DFP1X_OUTPUT_CONTROL_PARAMETERS + +#define DFP1I_OUTPUT_CONTROL_PARAMETERS DFP1_OUTPUT_CONTROL_PARAMETERS +#define DFP1I_OUTPUT_CONTROL_PS_ALLOCATION DFP1_OUTPUT_CONTROL_PS_ALLOCATION + +#define ATOM_DEVICE_DFP1I_SUPPORT ATOM_DEVICE_DFP1_SUPPORT +#define ATOM_DEVICE_DFP1X_SUPPORT ATOM_DEVICE_DFP2_SUPPORT + +#define ATOM_DEVICE_DFP1I_INDEX ATOM_DEVICE_DFP1_INDEX +#define ATOM_DEVICE_DFP1X_INDEX ATOM_DEVICE_DFP2_INDEX + +#define ATOM_DEVICE_DFP2I_INDEX 0x00000009 +#define ATOM_DEVICE_DFP2I_SUPPORT (0x1L << ATOM_DEVICE_DFP2I_INDEX) + +#define ATOM_S0_DFP1I ATOM_S0_DFP1 +#define ATOM_S0_DFP1X ATOM_S0_DFP2 + +#define ATOM_S0_DFP2I 0x00200000L +#define ATOM_S0_DFP2Ib2 0x20 + +#define ATOM_S2_DFP1I_DPMS_STATE ATOM_S2_DFP1_DPMS_STATE +#define ATOM_S2_DFP1X_DPMS_STATE ATOM_S2_DFP2_DPMS_STATE + +#define ATOM_S2_DFP2I_DPMS_STATE 0x02000000L +#define ATOM_S2_DFP2I_DPMS_STATEb3 0x02 + +#define ATOM_S3_DFP2I_ACTIVEb1 0x02 + +#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE +#define ATOM_S3_DFP1X_ACTIVE ATOM_S3_DFP2_ACTIVE + +#define ATOM_S3_DFP2I_ACTIVE 0x00000200L + +#define ATOM_S3_DFP1I_CRTC_ACTIVE ATOM_S3_DFP1_CRTC_ACTIVE +#define ATOM_S3_DFP1X_CRTC_ACTIVE ATOM_S3_DFP2_CRTC_ACTIVE +#define ATOM_S3_DFP2I_CRTC_ACTIVE 0x02000000L + + +#define ATOM_S3_DFP2I_CRTC_ACTIVEb3 0x02 +#define ATOM_S5_DOS_REQ_DFP2Ib1 0x02 + +#define ATOM_S5_DOS_REQ_DFP2I 0x0200 +#define ATOM_S6_ACC_REQ_DFP1I ATOM_S6_ACC_REQ_DFP1 +#define ATOM_S6_ACC_REQ_DFP1X ATOM_S6_ACC_REQ_DFP2 + +#define ATOM_S6_ACC_REQ_DFP2Ib3 0x02 +#define ATOM_S6_ACC_REQ_DFP2I 0x02000000L + +#define TMDS1XEncoderControl DVOEncoderControl +#define DFP1XOutputControl DVOOutputControl + +#define ExternalDFPOutputControl DFP1XOutputControl +#define EnableExternalTMDS_Encoder TMDS1XEncoderControl + +#define DFP1IOutputControl TMDSAOutputControl +#define DFP2IOutputControl LVTMAOutputControl + +#define DAC1_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS +#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION + +#define DAC2_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS +#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION + +#define ucDac1Standard ucDacStandard +#define ucDac2Standard ucDacStandard + +#define TMDS1EncoderControl TMDSAEncoderControl +#define TMDS2EncoderControl LVTMAEncoderControl + +#define DFP1OutputControl TMDSAOutputControl +#define DFP2OutputControl LVTMAOutputControl +#define CRT1OutputControl DAC1OutputControl +#define CRT2OutputControl DAC2OutputControl + +//These two lines will be removed for sure in a few days, will follow up with Michael V. +#define EnableLVDS_SS EnableSpreadSpectrumOnPPLL +#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL + +#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L +#define ATOM_S2_LCD1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE +#define ATOM_S2_TV1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE +#define ATOM_S2_DFP1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE +#define ATOM_S2_CRT2_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE + +#define ATOM_S6_ACC_REQ_TV2 0x00400000L +#define ATOM_DEVICE_TV2_INDEX 0x00000006 +#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX) +#define ATOM_S0_TV2 0x00100000L +#define ATOM_S3_TV2_ACTIVE ATOM_S3_DFP6_ACTIVE +#define ATOM_S3_TV2_CRTC_ACTIVE ATOM_S3_DFP6_CRTC_ACTIVE + +/*********************************************************************************/ + +#pragma pack() // BIOS data must use byte aligment + +#pragma pack(1) + +typedef struct _ATOM_HOLE_INFO +{ + USHORT usOffset; // offset of the hole ( from the start of the binary ) + USHORT usLength; // length of the hole ( in bytes ) +}ATOM_HOLE_INFO; + +typedef struct _ATOM_SERVICE_DESCRIPTION +{ + UCHAR ucRevision; // Holes set revision + UCHAR ucAlgorithm; // Hash algorithm + UCHAR ucSignatureType; // Signature type ( 0 - no signature, 1 - test, 2 - production ) + UCHAR ucReserved; + USHORT usSigOffset; // Signature offset ( from the start of the binary ) + USHORT usSigLength; // Signature length +}ATOM_SERVICE_DESCRIPTION; + + +typedef struct _ATOM_SERVICE_INFO +{ + ATOM_COMMON_TABLE_HEADER asHeader; + ATOM_SERVICE_DESCRIPTION asDescr; + UCHAR ucholesNo; // number of holes that follow + ATOM_HOLE_INFO holes[1]; // array of hole descriptions +}ATOM_SERVICE_INFO; + + + +#pragma pack() // BIOS data must use byte aligment + +// +// AMD ACPI Table +// +#pragma pack(1) + +typedef struct { + ULONG Signature; + ULONG TableLength; //Length + UCHAR Revision; + UCHAR Checksum; + UCHAR OemId[6]; + UCHAR OemTableId[8]; //UINT64 OemTableId; + ULONG OemRevision; + ULONG CreatorId; + ULONG CreatorRevision; +} AMD_ACPI_DESCRIPTION_HEADER; +/* +//EFI_ACPI_DESCRIPTION_HEADER from AcpiCommon.h +typedef struct { + UINT32 Signature; //0x0 + UINT32 Length; //0x4 + UINT8 Revision; //0x8 + UINT8 Checksum; //0x9 + UINT8 OemId[6]; //0xA + UINT64 OemTableId; //0x10 + UINT32 OemRevision; //0x18 + UINT32 CreatorId; //0x1C + UINT32 CreatorRevision; //0x20 +}EFI_ACPI_DESCRIPTION_HEADER; +*/ +typedef struct { + AMD_ACPI_DESCRIPTION_HEADER SHeader; + UCHAR TableUUID[16]; //0x24 + ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture. + ULONG Lib1ImageOffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture. + ULONG Reserved[4]; //0x3C +}UEFI_ACPI_VFCT; + +typedef struct { + ULONG PCIBus; //0x4C + ULONG PCIDevice; //0x50 + ULONG PCIFunction; //0x54 + USHORT VendorID; //0x58 + USHORT DeviceID; //0x5A + USHORT SSVID; //0x5C + USHORT SSID; //0x5E + ULONG Revision; //0x60 + ULONG ImageLength; //0x64 +}VFCT_IMAGE_HEADER; + + +typedef struct { + VFCT_IMAGE_HEADER VbiosHeader; + UCHAR VbiosContent[1]; +}GOP_VBIOS_CONTENT; + +typedef struct { + VFCT_IMAGE_HEADER Lib1Header; + UCHAR Lib1Content[1]; +}GOP_LIB1_CONTENT; + +#pragma pack() + + +#endif /* _ATOMBIOS_H */ + +#include "pptable.h" + diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h new file mode 100644 index 000000000..992dcd8a5 --- /dev/null +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -0,0 +1,624 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#ifndef _CGS_COMMON_H +#define _CGS_COMMON_H + +#include "amd_shared.h" + +/** + * enum cgs_gpu_mem_type - GPU memory types + */ +enum cgs_gpu_mem_type { + CGS_GPU_MEM_TYPE__VISIBLE_FB, + CGS_GPU_MEM_TYPE__INVISIBLE_FB, + CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, + CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB, + CGS_GPU_MEM_TYPE__GART_CACHEABLE, + CGS_GPU_MEM_TYPE__GART_WRITECOMBINE +}; + +/** + * enum cgs_ind_reg - Indirect register spaces + */ +enum cgs_ind_reg { + CGS_IND_REG__MMIO, + CGS_IND_REG__PCIE, + CGS_IND_REG__SMC, + CGS_IND_REG__UVD_CTX, + CGS_IND_REG__DIDT, + CGS_IND_REG__AUDIO_ENDPT +}; + +/** + * enum cgs_clock - Clocks controlled by the SMU + */ +enum cgs_clock { + CGS_CLOCK__SCLK, + CGS_CLOCK__MCLK, + CGS_CLOCK__VCLK, + CGS_CLOCK__DCLK, + CGS_CLOCK__ECLK, + CGS_CLOCK__ACLK, + CGS_CLOCK__ICLK, + /* ... */ +}; + +/** + * enum cgs_engine - Engines that can be statically power-gated + */ +enum cgs_engine { + CGS_ENGINE__UVD, + CGS_ENGINE__VCE, + CGS_ENGINE__VP8, + CGS_ENGINE__ACP_DMA, + CGS_ENGINE__ACP_DSP0, + CGS_ENGINE__ACP_DSP1, + CGS_ENGINE__ISP, + /* ... */ +}; + +/** + * enum cgs_voltage_planes - Voltage planes for external camera HW + */ +enum cgs_voltage_planes { + CGS_VOLTAGE_PLANE__SENSOR0, + CGS_VOLTAGE_PLANE__SENSOR1, + /* ... */ +}; + +/* + * enum cgs_ucode_id - Firmware types for different IPs + */ +enum cgs_ucode_id { + CGS_UCODE_ID_SMU = 0, + CGS_UCODE_ID_SDMA0, + CGS_UCODE_ID_SDMA1, + CGS_UCODE_ID_CP_CE, + CGS_UCODE_ID_CP_PFP, + CGS_UCODE_ID_CP_ME, + CGS_UCODE_ID_CP_MEC, + CGS_UCODE_ID_CP_MEC_JT1, + CGS_UCODE_ID_CP_MEC_JT2, + CGS_UCODE_ID_GMCON_RENG, + CGS_UCODE_ID_RLC_G, + CGS_UCODE_ID_MAXIMUM, +}; + +/** + * struct cgs_clock_limits - Clock limits + * + * Clocks are specified in 10KHz units. + */ +struct cgs_clock_limits { + unsigned min; /**< Minimum supported frequency */ + unsigned max; /**< Maxumim supported frequency */ + unsigned sustainable; /**< Thermally sustainable frequency */ +}; + +/** + * struct cgs_firmware_info - Firmware information + */ +struct cgs_firmware_info { + uint16_t version; + uint16_t feature_version; + uint32_t image_size; + uint64_t mc_addr; + void *kptr; +}; + +typedef unsigned long cgs_handle_t; + +/** + * cgs_gpu_mem_info() - Return information about memory heaps + * @cgs_device: opaque device handle + * @type: memory type + * @mc_start: Start MC address of the heap (output) + * @mc_size: MC address space size (output) + * @mem_size: maximum amount of memory available for allocation (output) + * + * This function returns information about memory heaps. The type + * parameter is used to select the memory heap. The mc_start and + * mc_size for GART heaps may be bigger than the memory available for + * allocation. + * + * mc_start and mc_size are undefined for non-contiguous FB memory + * types, since buffers allocated with these types may or may not be + * GART mapped. + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_gpu_mem_info_t)(void *cgs_device, enum cgs_gpu_mem_type type, + uint64_t *mc_start, uint64_t *mc_size, + uint64_t *mem_size); + +/** + * cgs_gmap_kmem() - map kernel memory to GART aperture + * @cgs_device: opaque device handle + * @kmem: pointer to kernel memory + * @size: size to map + * @min_offset: minimum offset from start of GART aperture + * @max_offset: maximum offset from start of GART aperture + * @kmem_handle: kernel memory handle (output) + * @mcaddr: MC address (output) + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_gmap_kmem_t)(void *cgs_device, void *kmem, uint64_t size, + uint64_t min_offset, uint64_t max_offset, + cgs_handle_t *kmem_handle, uint64_t *mcaddr); + +/** + * cgs_gunmap_kmem() - unmap kernel memory + * @cgs_device: opaque device handle + * @kmem_handle: kernel memory handle returned by gmap_kmem + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_gunmap_kmem_t)(void *cgs_device, cgs_handle_t kmem_handle); + +/** + * cgs_alloc_gpu_mem() - Allocate GPU memory + * @cgs_device: opaque device handle + * @type: memory type + * @size: size in bytes + * @align: alignment in bytes + * @min_offset: minimum offset from start of heap + * @max_offset: maximum offset from start of heap + * @handle: memory handle (output) + * + * The memory types CGS_GPU_MEM_TYPE_*_CONTIG_FB force contiguous + * memory allocation. This guarantees that the MC address returned by + * cgs_gmap_gpu_mem is not mapped through the GART. The non-contiguous + * FB memory types may be GART mapped depending on memory + * fragmentation and memory allocator policies. + * + * If min/max_offset are non-0, the allocation will be forced to + * reside between these offsets in its respective memory heap. The + * base address that the offset relates to, depends on the memory + * type. + * + * - CGS_GPU_MEM_TYPE__*_CONTIG_FB: FB MC base address + * - CGS_GPU_MEM_TYPE__GART_*: GART aperture base address + * - others: undefined, don't use with max_offset + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_alloc_gpu_mem_t)(void *cgs_device, enum cgs_gpu_mem_type type, + uint64_t size, uint64_t align, + uint64_t min_offset, uint64_t max_offset, + cgs_handle_t *handle); + +/** + * cgs_free_gpu_mem() - Free GPU memory + * @cgs_device: opaque device handle + * @handle: memory handle returned by alloc or import + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_free_gpu_mem_t)(void *cgs_device, cgs_handle_t handle); + +/** + * cgs_gmap_gpu_mem() - GPU-map GPU memory + * @cgs_device: opaque device handle + * @handle: memory handle returned by alloc or import + * @mcaddr: MC address (output) + * + * Ensures that a buffer is GPU accessible and returns its MC address. + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_gmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle, + uint64_t *mcaddr); + +/** + * cgs_gunmap_gpu_mem() - GPU-unmap GPU memory + * @cgs_device: opaque device handle + * @handle: memory handle returned by alloc or import + * + * Allows the buffer to be migrated while it's not used by the GPU. + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_gunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle); + +/** + * cgs_kmap_gpu_mem() - Kernel-map GPU memory + * + * @cgs_device: opaque device handle + * @handle: memory handle returned by alloc or import + * @map: Kernel virtual address the memory was mapped to (output) + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_kmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle, + void **map); + +/** + * cgs_kunmap_gpu_mem() - Kernel-unmap GPU memory + * @cgs_device: opaque device handle + * @handle: memory handle returned by alloc or import + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_kunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle); + +/** + * cgs_read_register() - Read an MMIO register + * @cgs_device: opaque device handle + * @offset: register offset + * + * Return: register value + */ +typedef uint32_t (*cgs_read_register_t)(void *cgs_device, unsigned offset); + +/** + * cgs_write_register() - Write an MMIO register + * @cgs_device: opaque device handle + * @offset: register offset + * @value: register value + */ +typedef void (*cgs_write_register_t)(void *cgs_device, unsigned offset, + uint32_t value); + +/** + * cgs_read_ind_register() - Read an indirect register + * @cgs_device: opaque device handle + * @offset: register offset + * + * Return: register value + */ +typedef uint32_t (*cgs_read_ind_register_t)(void *cgs_device, enum cgs_ind_reg space, + unsigned index); + +/** + * cgs_write_ind_register() - Write an indirect register + * @cgs_device: opaque device handle + * @offset: register offset + * @value: register value + */ +typedef void (*cgs_write_ind_register_t)(void *cgs_device, enum cgs_ind_reg space, + unsigned index, uint32_t value); + +/** + * cgs_read_pci_config_byte() - Read byte from PCI configuration space + * @cgs_device: opaque device handle + * @addr: address + * + * Return: Value read + */ +typedef uint8_t (*cgs_read_pci_config_byte_t)(void *cgs_device, unsigned addr); + +/** + * cgs_read_pci_config_word() - Read word from PCI configuration space + * @cgs_device: opaque device handle + * @addr: address, must be word-aligned + * + * Return: Value read + */ +typedef uint16_t (*cgs_read_pci_config_word_t)(void *cgs_device, unsigned addr); + +/** + * cgs_read_pci_config_dword() - Read dword from PCI configuration space + * @cgs_device: opaque device handle + * @addr: address, must be dword-aligned + * + * Return: Value read + */ +typedef uint32_t (*cgs_read_pci_config_dword_t)(void *cgs_device, + unsigned addr); + +/** + * cgs_write_pci_config_byte() - Write byte to PCI configuration space + * @cgs_device: opaque device handle + * @addr: address + * @value: value to write + */ +typedef void (*cgs_write_pci_config_byte_t)(void *cgs_device, unsigned addr, + uint8_t value); + +/** + * cgs_write_pci_config_word() - Write byte to PCI configuration space + * @cgs_device: opaque device handle + * @addr: address, must be word-aligned + * @value: value to write + */ +typedef void (*cgs_write_pci_config_word_t)(void *cgs_device, unsigned addr, + uint16_t value); + +/** + * cgs_write_pci_config_dword() - Write byte to PCI configuration space + * @cgs_device: opaque device handle + * @addr: address, must be dword-aligned + * @value: value to write + */ +typedef void (*cgs_write_pci_config_dword_t)(void *cgs_device, unsigned addr, + uint32_t value); + +/** + * cgs_atom_get_data_table() - Get a pointer to an ATOM BIOS data table + * @cgs_device: opaque device handle + * @table: data table index + * @size: size of the table (output, may be NULL) + * @frev: table format revision (output, may be NULL) + * @crev: table content revision (output, may be NULL) + * + * Return: Pointer to start of the table, or NULL on failure + */ +typedef const void *(*cgs_atom_get_data_table_t)( + void *cgs_device, unsigned table, + uint16_t *size, uint8_t *frev, uint8_t *crev); + +/** + * cgs_atom_get_cmd_table_revs() - Get ATOM BIOS command table revisions + * @cgs_device: opaque device handle + * @table: data table index + * @frev: table format revision (output, may be NULL) + * @crev: table content revision (output, may be NULL) + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_atom_get_cmd_table_revs_t)(void *cgs_device, unsigned table, + uint8_t *frev, uint8_t *crev); + +/** + * cgs_atom_exec_cmd_table() - Execute an ATOM BIOS command table + * @cgs_device: opaque device handle + * @table: command table index + * @args: arguments + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_atom_exec_cmd_table_t)(void *cgs_device, + unsigned table, void *args); + +/** + * cgs_create_pm_request() - Create a power management request + * @cgs_device: opaque device handle + * @request: handle of created PM request (output) + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_create_pm_request_t)(void *cgs_device, cgs_handle_t *request); + +/** + * cgs_destroy_pm_request() - Destroy a power management request + * @cgs_device: opaque device handle + * @request: handle of created PM request + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_destroy_pm_request_t)(void *cgs_device, cgs_handle_t request); + +/** + * cgs_set_pm_request() - Activate or deactiveate a PM request + * @cgs_device: opaque device handle + * @request: PM request handle + * @active: 0 = deactivate, non-0 = activate + * + * While a PM request is active, its minimum clock requests are taken + * into account as the requested engines are powered up. When the + * request is inactive, the engines may be powered down and clocks may + * be lower, depending on other PM requests by other driver + * components. + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_set_pm_request_t)(void *cgs_device, cgs_handle_t request, + int active); + +/** + * cgs_pm_request_clock() - Request a minimum frequency for a specific clock + * @cgs_device: opaque device handle + * @request: PM request handle + * @clock: which clock? + * @freq: requested min. frequency in 10KHz units (0 to clear request) + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_pm_request_clock_t)(void *cgs_device, cgs_handle_t request, + enum cgs_clock clock, unsigned freq); + +/** + * cgs_pm_request_engine() - Request an engine to be powered up + * @cgs_device: opaque device handle + * @request: PM request handle + * @engine: which engine? + * @powered: 0 = powered down, non-0 = powered up + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_pm_request_engine_t)(void *cgs_device, cgs_handle_t request, + enum cgs_engine engine, int powered); + +/** + * cgs_pm_query_clock_limits() - Query clock frequency limits + * @cgs_device: opaque device handle + * @clock: which clock? + * @limits: clock limits + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_pm_query_clock_limits_t)(void *cgs_device, + enum cgs_clock clock, + struct cgs_clock_limits *limits); + +/** + * cgs_set_camera_voltages() - Apply specific voltages to PMIC voltage planes + * @cgs_device: opaque device handle + * @mask: bitmask of voltages to change (1<ops->func(dev, ##__VA_ARGS__)) +#define CGS_OS_CALL(func,dev,...) \ + (((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__)) + +#define cgs_gpu_mem_info(dev,type,mc_start,mc_size,mem_size) \ + CGS_CALL(gpu_mem_info,dev,type,mc_start,mc_size,mem_size) +#define cgs_gmap_kmem(dev,kmem,size,min_off,max_off,kmem_handle,mcaddr) \ + CGS_CALL(gmap_kmem,dev,kmem,size,min_off,max_off,kmem_handle,mcaddr) +#define cgs_gunmap_kmem(dev,kmem_handle) \ + CGS_CALL(gunmap_kmem,dev,keme_handle) +#define cgs_alloc_gpu_mem(dev,type,size,align,min_off,max_off,handle) \ + CGS_CALL(alloc_gpu_mem,dev,type,size,align,min_off,max_off,handle) +#define cgs_free_gpu_mem(dev,handle) \ + CGS_CALL(free_gpu_mem,dev,handle) +#define cgs_gmap_gpu_mem(dev,handle,mcaddr) \ + CGS_CALL(gmap_gpu_mem,dev,handle,mcaddr) +#define cgs_gunmap_gpu_mem(dev,handle) \ + CGS_CALL(gunmap_gpu_mem,dev,handle) +#define cgs_kmap_gpu_mem(dev,handle,map) \ + CGS_CALL(kmap_gpu_mem,dev,handle,map) +#define cgs_kunmap_gpu_mem(dev,handle) \ + CGS_CALL(kunmap_gpu_mem,dev,handle) + +#define cgs_read_register(dev,offset) \ + CGS_CALL(read_register,dev,offset) +#define cgs_write_register(dev,offset,value) \ + CGS_CALL(write_register,dev,offset,value) +#define cgs_read_ind_register(dev,space,index) \ + CGS_CALL(read_ind_register,dev,space,index) +#define cgs_write_ind_register(dev,space,index,value) \ + CGS_CALL(write_ind_register,dev,space,index,value) + +#define cgs_read_pci_config_byte(dev,addr) \ + CGS_CALL(read_pci_config_byte,dev,addr) +#define cgs_read_pci_config_word(dev,addr) \ + CGS_CALL(read_pci_config_word,dev,addr) +#define cgs_read_pci_config_dword(dev,addr) \ + CGS_CALL(read_pci_config_dword,dev,addr) +#define cgs_write_pci_config_byte(dev,addr,value) \ + CGS_CALL(write_pci_config_byte,dev,addr,value) +#define cgs_write_pci_config_word(dev,addr,value) \ + CGS_CALL(write_pci_config_word,dev,addr,value) +#define cgs_write_pci_config_dword(dev,addr,value) \ + CGS_CALL(write_pci_config_dword,dev,addr,value) + +#define cgs_atom_get_data_table(dev,table,size,frev,crev) \ + CGS_CALL(atom_get_data_table,dev,table,size,frev,crev) +#define cgs_atom_get_cmd_table_revs(dev,table,frev,crev) \ + CGS_CALL(atom_get_cmd_table_revs,dev,table,frev,crev) +#define cgs_atom_exec_cmd_table(dev,table,args) \ + CGS_CALL(atom_exec_cmd_table,dev,table,args) + +#define cgs_create_pm_request(dev,request) \ + CGS_CALL(create_pm_request,dev,request) +#define cgs_destroy_pm_request(dev,request) \ + CGS_CALL(destroy_pm_request,dev,request) +#define cgs_set_pm_request(dev,request,active) \ + CGS_CALL(set_pm_request,dev,request,active) +#define cgs_pm_request_clock(dev,request,clock,freq) \ + CGS_CALL(pm_request_clock,dev,request,clock,freq) +#define cgs_pm_request_engine(dev,request,engine,powered) \ + CGS_CALL(pm_request_engine,dev,request,engine,powered) +#define cgs_pm_query_clock_limits(dev,clock,limits) \ + CGS_CALL(pm_query_clock_limits,dev,clock,limits) +#define cgs_set_camera_voltages(dev,mask,voltages) \ + CGS_CALL(set_camera_voltages,dev,mask,voltages) +#define cgs_get_firmware_info(dev, type, info) \ + CGS_CALL(get_firmware_info, dev, type, info) +#define cgs_set_powergating_state(dev, block_type, state) \ + CGS_CALL(set_powergating_state, dev, block_type, state) +#define cgs_set_clockgating_state(dev, block_type, state) \ + CGS_CALL(set_clockgating_state, dev, block_type, state) + +#endif /* _CGS_COMMON_H */ diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h new file mode 100644 index 000000000..3b47ae313 --- /dev/null +++ b/drivers/gpu/drm/amd/include/cgs_linux.h @@ -0,0 +1,118 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#ifndef _CGS_LINUX_H +#define _CGS_LINUX_H + +#include "cgs_common.h" + +/** + * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources + * @private_data: private data provided to cgs_add_irq_source + * @src_id: interrupt source ID + * @type: interrupt type + * @enabled: 0 = disable source, non-0 = enable source + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_irq_source_set_func_t)(void *private_data, + unsigned src_id, unsigned type, + int enabled); + +/** + * cgs_irq_handler_func() - Interrupt handler callback + * @private_data: private data provided to cgs_add_irq_source + * @src_id: interrupt source ID + * @iv_entry: pointer to raw ih ring entry + * + * This callback runs in interrupt context. + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_irq_handler_func_t)(void *private_data, + unsigned src_id, const uint32_t *iv_entry); + +/** + * cgs_add_irq_source() - Add an IRQ source + * @cgs_device: opaque device handle + * @src_id: interrupt source ID + * @num_types: number of interrupt types that can be independently enabled + * @set: callback function to enable/disable an interrupt type + * @handler: interrupt handler callback + * @private_data: private data to pass to callback functions + * + * The same IRQ source can be added only once. Adding an IRQ source + * indicates ownership of that IRQ source and all its IRQ types. + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_add_irq_source_t)(void *cgs_device, unsigned src_id, + unsigned num_types, + cgs_irq_source_set_func_t set, + cgs_irq_handler_func_t handler, + void *private_data); + +/** + * cgs_irq_get() - Request enabling an IRQ source and type + * @cgs_device: opaque device handle + * @src_id: interrupt source ID + * @type: interrupt type + * + * cgs_irq_get and cgs_irq_put calls must be balanced. They count + * "references" to IRQ sources. + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type); + +/** + * cgs_irq_put() - Indicate IRQ source is no longer needed + * @cgs_device: opaque device handle + * @src_id: interrupt source ID + * @type: interrupt type + * + * cgs_irq_get and cgs_irq_put calls must be balanced. They count + * "references" to IRQ sources. Even after cgs_irq_put is called, the + * IRQ handler may still be called if there are more refecences to + * the IRQ source. + * + * Return: 0 on success, -errno otherwise + */ +typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type); + +struct cgs_os_ops { + /* IRQ handling */ + cgs_add_irq_source_t add_irq_source; + cgs_irq_get_t irq_get; + cgs_irq_put_t irq_put; +}; + +#define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \ + CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \ + private_data) +#define cgs_irq_get(dev,src_id,type) \ + CGS_OS_CALL(irq_get,dev,src_id,type) +#define cgs_irq_put(dev,src_id,type) \ + CGS_OS_CALL(irq_put,dev,src_id,type) + +#endif /* _CGS_LINUX_H */ diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 9080daa11..888250b33 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -52,7 +52,8 @@ enum kgd_engine_type { KGD_ENGINE_MEC1, KGD_ENGINE_MEC2, KGD_ENGINE_RLC, - KGD_ENGINE_SDMA, + KGD_ENGINE_SDMA1, + KGD_ENGINE_SDMA2, KGD_ENGINE_MAX }; diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h new file mode 100644 index 000000000..ee6978b30 --- /dev/null +++ b/drivers/gpu/drm/amd/include/pptable.h @@ -0,0 +1,702 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _PPTABLE_H +#define _PPTABLE_H + +#pragma pack(1) + +typedef struct _ATOM_PPLIB_THERMALCONTROLLER + +{ + UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_* + UCHAR ucI2cLine; // as interpreted by DAL I2C + UCHAR ucI2cAddress; + UCHAR ucFanParameters; // Fan Control Parameters. + UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only. + UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only. + UCHAR ucReserved; // ---- + UCHAR ucFlags; // to be defined +} ATOM_PPLIB_THERMALCONTROLLER; + +#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f +#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller. + +#define ATOM_PP_THERMALCONTROLLER_NONE 0 +#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib +#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib +#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib +#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib +#define ATOM_PP_THERMALCONTROLLER_LM64 5 +#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib +#define ATOM_PP_THERMALCONTROLLER_RV6xx 7 +#define ATOM_PP_THERMALCONTROLLER_RV770 8 +#define ATOM_PP_THERMALCONTROLLER_ADT7473 9 +#define ATOM_PP_THERMALCONTROLLER_KONG 10 +#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11 +#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12 +#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen. +#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally +#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15 +#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16 +#define ATOM_PP_THERMALCONTROLLER_LM96163 17 +#define ATOM_PP_THERMALCONTROLLER_CISLANDS 18 +#define ATOM_PP_THERMALCONTROLLER_KAVERI 19 + + +// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal. +// We probably should reserve the bit 0x80 for this use. +// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here). +// The driver can pick the correct internal controller based on the ASIC. + +#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller +#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller + +typedef struct _ATOM_PPLIB_STATE +{ + UCHAR ucNonClockStateIndex; + UCHAR ucClockStateIndices[1]; // variable-sized +} ATOM_PPLIB_STATE; + + +typedef struct _ATOM_PPLIB_FANTABLE +{ + UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same. + UCHAR ucTHyst; // Temperature hysteresis. Integer. + USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. + USHORT usTMed; // The middle temperature where we change slopes. + USHORT usTHigh; // The high point above TMed for adjusting the second slope. + USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments). + USHORT usPWMMed; // The PWM value (in percent) at TMed. + USHORT usPWMHigh; // The PWM value at THigh. +} ATOM_PPLIB_FANTABLE; + +typedef struct _ATOM_PPLIB_FANTABLE2 +{ + ATOM_PPLIB_FANTABLE basicTable; + USHORT usTMax; // The max temperature +} ATOM_PPLIB_FANTABLE2; + +typedef struct _ATOM_PPLIB_FANTABLE3 +{ + ATOM_PPLIB_FANTABLE2 basicTable2; + UCHAR ucFanControlMode; + USHORT usFanPWMMax; + USHORT usFanOutputSensitivity; +} ATOM_PPLIB_FANTABLE3; + +typedef struct _ATOM_PPLIB_EXTENDEDHEADER +{ + USHORT usSize; + ULONG ulMaxEngineClock; // For Overdrive. + ULONG ulMaxMemoryClock; // For Overdrive. + // Add extra system parameters here, always adjust size to include all fields. + USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table + USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table + USHORT usSAMUTableOffset; //points to ATOM_PPLIB_SAMU_Table + USHORT usPPMTableOffset; //points to ATOM_PPLIB_PPM_Table + USHORT usACPTableOffset; //points to ATOM_PPLIB_ACP_Table + /* points to ATOM_PPLIB_POWERTUNE_Table */ + USHORT usPowerTuneTableOffset; + /* points to ATOM_PPLIB_CLOCK_Voltage_Dependency_Table for sclkVddgfxTable */ + USHORT usSclkVddgfxTableOffset; +} ATOM_PPLIB_EXTENDEDHEADER; + +//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps +#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1 +#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2 +#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4 +#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8 +#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16 +#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32 +#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64 +#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128 +#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256 +#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512 +#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024 +#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048 +#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096 +#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition. +#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition). +#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC. +#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature. +#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state. +#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE 0x00040000 // Does the driver supports new CAC voltage table. +#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY 0x00080000 // Does the driver supports revert GPIO5 polarity. +#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17 0x00100000 // Does the driver supports thermal2GPIO17. +#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE 0x00200000 // Does the driver supports VR HOT GPIO Configurable. +#define ATOM_PP_PLATFORM_CAP_TEMP_INVERSION 0x00400000 // Does the driver supports Temp Inversion feature. +#define ATOM_PP_PLATFORM_CAP_EVV 0x00800000 +#define ATOM_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL 0x01000000 +#define ATOM_PP_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE 0x02000000 +#define ATOM_PP_PLATFORM_CAP_DISABLE_USING_ACTUAL_TEMPERATURE_FOR_POWER_CALC 0x04000000 + +typedef struct _ATOM_PPLIB_POWERPLAYTABLE +{ + ATOM_COMMON_TABLE_HEADER sHeader; + + UCHAR ucDataRevision; + + UCHAR ucNumStates; + UCHAR ucStateEntrySize; + UCHAR ucClockInfoSize; + UCHAR ucNonClockSize; + + // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures + USHORT usStateArrayOffset; + + // offset from start of this table to array of ASIC-specific structures, + // currently ATOM_PPLIB_CLOCK_INFO. + USHORT usClockInfoArrayOffset; + + // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO + USHORT usNonClockInfoArrayOffset; + + USHORT usBackbiasTime; // in microseconds + USHORT usVoltageTime; // in microseconds + USHORT usTableSize; //the size of this structure, or the extended structure + + ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_* + + ATOM_PPLIB_THERMALCONTROLLER sThermalController; + + USHORT usBootClockInfoOffset; + USHORT usBootNonClockInfoOffset; + +} ATOM_PPLIB_POWERPLAYTABLE; + +typedef struct _ATOM_PPLIB_POWERPLAYTABLE2 +{ + ATOM_PPLIB_POWERPLAYTABLE basicTable; + UCHAR ucNumCustomThermalPolicy; + USHORT usCustomThermalPolicyArrayOffset; +}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2; + +typedef struct _ATOM_PPLIB_POWERPLAYTABLE3 +{ + ATOM_PPLIB_POWERPLAYTABLE2 basicTable2; + USHORT usFormatID; // To be used ONLY by PPGen. + USHORT usFanTableOffset; + USHORT usExtendendedHeaderOffset; +} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3; + +typedef struct _ATOM_PPLIB_POWERPLAYTABLE4 +{ + ATOM_PPLIB_POWERPLAYTABLE3 basicTable3; + ULONG ulGoldenPPID; // PPGen use only + ULONG ulGoldenRevision; // PPGen use only + USHORT usVddcDependencyOnSCLKOffset; + USHORT usVddciDependencyOnMCLKOffset; + USHORT usVddcDependencyOnMCLKOffset; + USHORT usMaxClockVoltageOnDCOffset; + USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table + USHORT usMvddDependencyOnMCLKOffset; +} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4; + +typedef struct _ATOM_PPLIB_POWERPLAYTABLE5 +{ + ATOM_PPLIB_POWERPLAYTABLE4 basicTable4; + ULONG ulTDPLimit; + ULONG ulNearTDPLimit; + ULONG ulSQRampingThreshold; + USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table + ULONG ulCACLeakage; // The iLeakage for driver calculated CAC leakage table + USHORT usTDPODLimit; + USHORT usLoadLineSlope; // in milliOhms * 100 +} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5; + +//// ATOM_PPLIB_NONCLOCK_INFO::usClassification +#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 +#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 +#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0 +#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1 +#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3 +#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5 +// 2, 4, 6, 7 are reserved + +#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008 +#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010 +#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020 +#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040 +#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080 +#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100 +#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200 +#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400 +#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800 +#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000 +#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000 +#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000 +#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000 + +//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2 +#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001 +#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002 +#define ATOM_PPLIB_CLASSIFICATION2_MVC 0x0004 //Multi-View Codec (BD-3D) + +//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings +#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001 +#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002 + +// 0 is 2.5Gb/s, 1 is 5Gb/s +#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004 +#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2 + +// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec +#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8 +#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3 + +// lookup into reduced refresh-rate table +#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00 +#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8 + +#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0 +#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1 +// 2-15 TBD as needed. + +#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000 +#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000 + +#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000 + +#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000 + +//memory related flags +#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000 + +//M3 Arb //2bits, current 3 sets of parameters in total +#define ATOM_PPLIB_M3ARB_MASK 0x00060000 +#define ATOM_PPLIB_M3ARB_SHIFT 17 + +#define ATOM_PPLIB_ENABLE_DRR 0x00080000 + +// remaining 16 bits are reserved +typedef struct _ATOM_PPLIB_THERMAL_STATE +{ + UCHAR ucMinTemperature; + UCHAR ucMaxTemperature; + UCHAR ucThermalAction; +}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE; + +// Contained in an array starting at the offset +// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset. +// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex +#define ATOM_PPLIB_NONCLOCKINFO_VER1 12 +#define ATOM_PPLIB_NONCLOCKINFO_VER2 24 +typedef struct _ATOM_PPLIB_NONCLOCK_INFO +{ + USHORT usClassification; + UCHAR ucMinTemperature; + UCHAR ucMaxTemperature; + ULONG ulCapsAndSettings; + UCHAR ucRequiredPower; + USHORT usClassification2; + ULONG ulVCLK; + ULONG ulDCLK; + UCHAR ucUnused[5]; +} ATOM_PPLIB_NONCLOCK_INFO; + +// Contained in an array starting at the offset +// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset. +// referenced from ATOM_PPLIB_STATE::ucClockStateIndices +typedef struct _ATOM_PPLIB_R600_CLOCK_INFO +{ + USHORT usEngineClockLow; + UCHAR ucEngineClockHigh; + + USHORT usMemoryClockLow; + UCHAR ucMemoryClockHigh; + + USHORT usVDDC; + USHORT usUnused1; + USHORT usUnused2; + + ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_* + +} ATOM_PPLIB_R600_CLOCK_INFO; + +// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO +#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1 +#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2 +#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4 +#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8 +#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16 +#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0). + +typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO + +{ + USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600). + UCHAR ucLowEngineClockHigh; + USHORT usHighEngineClockLow; // High Engine clock in MHz. + UCHAR ucHighEngineClockHigh; + USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants. + UCHAR ucMemoryClockHigh; // Currentyl unused. + UCHAR ucPadding; // For proper alignment and size. + USHORT usVDDC; // For the 780, use: None, Low, High, Variable + UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16} + UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could + USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200). + ULONG ulFlags; +} ATOM_PPLIB_RS780_CLOCK_INFO; + +#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0 +#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1 +#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2 +#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3 + +#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is. +#define ATOM_PPLIB_RS780_SPMCLK_LOW 1 +#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2 + +#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0 +#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1 +#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2 + +typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO +{ + USHORT usEngineClockLow; + UCHAR ucEngineClockHigh; + + USHORT usMemoryClockLow; + UCHAR ucMemoryClockHigh; + + USHORT usVDDC; + USHORT usVDDCI; + USHORT usUnused; + + ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_* + +} ATOM_PPLIB_EVERGREEN_CLOCK_INFO; + +typedef struct _ATOM_PPLIB_SI_CLOCK_INFO +{ + USHORT usEngineClockLow; + UCHAR ucEngineClockHigh; + + USHORT usMemoryClockLow; + UCHAR ucMemoryClockHigh; + + USHORT usVDDC; + USHORT usVDDCI; + UCHAR ucPCIEGen; + UCHAR ucUnused1; + + ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now + +} ATOM_PPLIB_SI_CLOCK_INFO; + +typedef struct _ATOM_PPLIB_CI_CLOCK_INFO +{ + USHORT usEngineClockLow; + UCHAR ucEngineClockHigh; + + USHORT usMemoryClockLow; + UCHAR ucMemoryClockHigh; + + UCHAR ucPCIEGen; + USHORT usPCIELane; +} ATOM_PPLIB_CI_CLOCK_INFO; + +typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{ + USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz + UCHAR ucEngineClockHigh; //clockfrequency >> 16. + UCHAR vddcIndex; //2-bit vddc index; + USHORT tdpLimit; + //please initalize to 0 + USHORT rsv1; + //please initialize to 0s + ULONG rsv2[2]; +}ATOM_PPLIB_SUMO_CLOCK_INFO; + +typedef struct _ATOM_PPLIB_CZ_CLOCK_INFO { + UCHAR index; + UCHAR rsv[3]; +} ATOM_PPLIB_CZ_CLOCK_INFO; + +typedef struct _ATOM_PPLIB_STATE_V2 +{ + //number of valid dpm levels in this state; Driver uses it to calculate the whole + //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR) + UCHAR ucNumDPMLevels; + + //a index to the array of nonClockInfos + UCHAR nonClockInfoIndex; + /** + * Driver will read the first ucNumDPMLevels in this array + */ + UCHAR clockInfoIndex[1]; +} ATOM_PPLIB_STATE_V2; + +typedef struct _StateArray{ + //how many states we have + UCHAR ucNumEntries; + + ATOM_PPLIB_STATE_V2 states[1]; +}StateArray; + + +typedef struct _ClockInfoArray{ + //how many clock levels we have + UCHAR ucNumEntries; + + //sizeof(ATOM_PPLIB_CLOCK_INFO) + UCHAR ucEntrySize; + + UCHAR clockInfo[1]; +}ClockInfoArray; + +typedef struct _NonClockInfoArray{ + + //how many non-clock levels we have. normally should be same as number of states + UCHAR ucNumEntries; + //sizeof(ATOM_PPLIB_NONCLOCK_INFO) + UCHAR ucEntrySize; + + ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1]; +}NonClockInfoArray; + +typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record +{ + USHORT usClockLow; + UCHAR ucClockHigh; + USHORT usVoltage; +}ATOM_PPLIB_Clock_Voltage_Dependency_Record; + +typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table +{ + UCHAR ucNumEntries; // Number of entries. + ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries. +}ATOM_PPLIB_Clock_Voltage_Dependency_Table; + +typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record +{ + USHORT usSclkLow; + UCHAR ucSclkHigh; + USHORT usMclkLow; + UCHAR ucMclkHigh; + USHORT usVddc; + USHORT usVddci; +}ATOM_PPLIB_Clock_Voltage_Limit_Record; + +typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table +{ + UCHAR ucNumEntries; // Number of entries. + ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries. +}ATOM_PPLIB_Clock_Voltage_Limit_Table; + +union _ATOM_PPLIB_CAC_Leakage_Record +{ + struct + { + USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value. in CI we read it as StdVoltageHiSidd + ULONG ulLeakageValue; // For CI and newer we use this as the "fake" standar VDDC value. in CI we read it as StdVoltageLoSidd + + }; + struct + { + USHORT usVddc1; + USHORT usVddc2; + USHORT usVddc3; + }; +}; + +typedef union _ATOM_PPLIB_CAC_Leakage_Record ATOM_PPLIB_CAC_Leakage_Record; + +typedef struct _ATOM_PPLIB_CAC_Leakage_Table +{ + UCHAR ucNumEntries; // Number of entries. + ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries. +}ATOM_PPLIB_CAC_Leakage_Table; + +typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record +{ + USHORT usVoltage; + USHORT usSclkLow; + UCHAR ucSclkHigh; + USHORT usMclkLow; + UCHAR ucMclkHigh; +}ATOM_PPLIB_PhaseSheddingLimits_Record; + +typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table +{ + UCHAR ucNumEntries; // Number of entries. + ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries. +}ATOM_PPLIB_PhaseSheddingLimits_Table; + +typedef struct _VCEClockInfo{ + USHORT usEVClkLow; + UCHAR ucEVClkHigh; + USHORT usECClkLow; + UCHAR ucECClkHigh; +}VCEClockInfo; + +typedef struct _VCEClockInfoArray{ + UCHAR ucNumEntries; + VCEClockInfo entries[1]; +}VCEClockInfoArray; + +typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record +{ + USHORT usVoltage; + UCHAR ucVCEClockInfoIndex; +}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record; + +typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table +{ + UCHAR numEntries; + ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1]; +}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table; + +typedef struct _ATOM_PPLIB_VCE_State_Record +{ + UCHAR ucVCEClockInfoIndex; + UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary +}ATOM_PPLIB_VCE_State_Record; + +typedef struct _ATOM_PPLIB_VCE_State_Table +{ + UCHAR numEntries; + ATOM_PPLIB_VCE_State_Record entries[1]; +}ATOM_PPLIB_VCE_State_Table; + + +typedef struct _ATOM_PPLIB_VCE_Table +{ + UCHAR revid; +// VCEClockInfoArray array; +// ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits; +// ATOM_PPLIB_VCE_State_Table states; +}ATOM_PPLIB_VCE_Table; + + +typedef struct _UVDClockInfo{ + USHORT usVClkLow; + UCHAR ucVClkHigh; + USHORT usDClkLow; + UCHAR ucDClkHigh; +}UVDClockInfo; + +typedef struct _UVDClockInfoArray{ + UCHAR ucNumEntries; + UVDClockInfo entries[1]; +}UVDClockInfoArray; + +typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record +{ + USHORT usVoltage; + UCHAR ucUVDClockInfoIndex; +}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record; + +typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table +{ + UCHAR numEntries; + ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1]; +}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table; + +typedef struct _ATOM_PPLIB_UVD_Table +{ + UCHAR revid; +// UVDClockInfoArray array; +// ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits; +}ATOM_PPLIB_UVD_Table; + +typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record +{ + USHORT usVoltage; + USHORT usSAMClockLow; + UCHAR ucSAMClockHigh; +}ATOM_PPLIB_SAMClk_Voltage_Limit_Record; + +typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{ + UCHAR numEntries; + ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1]; +}ATOM_PPLIB_SAMClk_Voltage_Limit_Table; + +typedef struct _ATOM_PPLIB_SAMU_Table +{ + UCHAR revid; + ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits; +}ATOM_PPLIB_SAMU_Table; + +typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Record +{ + USHORT usVoltage; + USHORT usACPClockLow; + UCHAR ucACPClockHigh; +}ATOM_PPLIB_ACPClk_Voltage_Limit_Record; + +typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Table{ + UCHAR numEntries; + ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1]; +}ATOM_PPLIB_ACPClk_Voltage_Limit_Table; + +typedef struct _ATOM_PPLIB_ACP_Table +{ + UCHAR revid; + ATOM_PPLIB_ACPClk_Voltage_Limit_Table limits; +}ATOM_PPLIB_ACP_Table; + +typedef struct _ATOM_PowerTune_Table{ + USHORT usTDP; + USHORT usConfigurableTDP; + USHORT usTDC; + USHORT usBatteryPowerLimit; + USHORT usSmallPowerLimit; + USHORT usLowCACLeakage; + USHORT usHighCACLeakage; +}ATOM_PowerTune_Table; + +typedef struct _ATOM_PPLIB_POWERTUNE_Table +{ + UCHAR revid; + ATOM_PowerTune_Table power_tune_table; +}ATOM_PPLIB_POWERTUNE_Table; + +typedef struct _ATOM_PPLIB_POWERTUNE_Table_V1 +{ + UCHAR revid; + ATOM_PowerTune_Table power_tune_table; + USHORT usMaximumPowerDeliveryLimit; + USHORT usTjMax; + USHORT usReserve[6]; +} ATOM_PPLIB_POWERTUNE_Table_V1; + +#define ATOM_PPM_A_A 1 +#define ATOM_PPM_A_I 2 +typedef struct _ATOM_PPLIB_PPM_Table +{ + UCHAR ucRevId; + UCHAR ucPpmDesign; //A+I or A+A + USHORT usCpuCoreNumber; + ULONG ulPlatformTDP; + ULONG ulSmallACPlatformTDP; + ULONG ulPlatformTDC; + ULONG ulSmallACPlatformTDC; + ULONG ulApuTDP; + ULONG ulDGpuTDP; + ULONG ulDGpuUlvPower; + ULONG ulTjmax; +} ATOM_PPLIB_PPM_Table; + +#pragma pack() + +#endif diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h new file mode 100644 index 000000000..65cfacd7a --- /dev/null +++ b/drivers/gpu/drm/amd/include/vi_structs.h @@ -0,0 +1,417 @@ +/* + * Copyright 2012 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef VI_STRUCTS_H_ +#define VI_STRUCTS_H_ + +struct vi_sdma_mqd { + uint32_t sdmax_rlcx_rb_cntl; + uint32_t sdmax_rlcx_rb_base; + uint32_t sdmax_rlcx_rb_base_hi; + uint32_t sdmax_rlcx_rb_rptr; + uint32_t sdmax_rlcx_rb_wptr; + uint32_t sdmax_rlcx_rb_wptr_poll_cntl; + uint32_t sdmax_rlcx_rb_wptr_poll_addr_hi; + uint32_t sdmax_rlcx_rb_wptr_poll_addr_lo; + uint32_t sdmax_rlcx_rb_rptr_addr_hi; + uint32_t sdmax_rlcx_rb_rptr_addr_lo; + uint32_t sdmax_rlcx_ib_cntl; + uint32_t sdmax_rlcx_ib_rptr; + uint32_t sdmax_rlcx_ib_offset; + uint32_t sdmax_rlcx_ib_base_lo; + uint32_t sdmax_rlcx_ib_base_hi; + uint32_t sdmax_rlcx_ib_size; + uint32_t sdmax_rlcx_skip_cntl; + uint32_t sdmax_rlcx_context_status; + uint32_t sdmax_rlcx_doorbell; + uint32_t sdmax_rlcx_virtual_addr; + uint32_t sdmax_rlcx_ape1_cntl; + uint32_t sdmax_rlcx_doorbell_log; + uint32_t reserved_22; + uint32_t reserved_23; + uint32_t reserved_24; + uint32_t reserved_25; + uint32_t reserved_26; + uint32_t reserved_27; + uint32_t reserved_28; + uint32_t reserved_29; + uint32_t reserved_30; + uint32_t reserved_31; + uint32_t reserved_32; + uint32_t reserved_33; + uint32_t reserved_34; + uint32_t reserved_35; + uint32_t reserved_36; + uint32_t reserved_37; + uint32_t reserved_38; + uint32_t reserved_39; + uint32_t reserved_40; + uint32_t reserved_41; + uint32_t reserved_42; + uint32_t reserved_43; + uint32_t reserved_44; + uint32_t reserved_45; + uint32_t reserved_46; + uint32_t reserved_47; + uint32_t reserved_48; + uint32_t reserved_49; + uint32_t reserved_50; + uint32_t reserved_51; + uint32_t reserved_52; + uint32_t reserved_53; + uint32_t reserved_54; + uint32_t reserved_55; + uint32_t reserved_56; + uint32_t reserved_57; + uint32_t reserved_58; + uint32_t reserved_59; + uint32_t reserved_60; + uint32_t reserved_61; + uint32_t reserved_62; + uint32_t reserved_63; + uint32_t reserved_64; + uint32_t reserved_65; + uint32_t reserved_66; + uint32_t reserved_67; + uint32_t reserved_68; + uint32_t reserved_69; + uint32_t reserved_70; + uint32_t reserved_71; + uint32_t reserved_72; + uint32_t reserved_73; + uint32_t reserved_74; + uint32_t reserved_75; + uint32_t reserved_76; + uint32_t reserved_77; + uint32_t reserved_78; + uint32_t reserved_79; + uint32_t reserved_80; + uint32_t reserved_81; + uint32_t reserved_82; + uint32_t reserved_83; + uint32_t reserved_84; + uint32_t reserved_85; + uint32_t reserved_86; + uint32_t reserved_87; + uint32_t reserved_88; + uint32_t reserved_89; + uint32_t reserved_90; + uint32_t reserved_91; + uint32_t reserved_92; + uint32_t reserved_93; + uint32_t reserved_94; + uint32_t reserved_95; + uint32_t reserved_96; + uint32_t reserved_97; + uint32_t reserved_98; + uint32_t reserved_99; + uint32_t reserved_100; + uint32_t reserved_101; + uint32_t reserved_102; + uint32_t reserved_103; + uint32_t reserved_104; + uint32_t reserved_105; + uint32_t reserved_106; + uint32_t reserved_107; + uint32_t reserved_108; + uint32_t reserved_109; + uint32_t reserved_110; + uint32_t reserved_111; + uint32_t reserved_112; + uint32_t reserved_113; + uint32_t reserved_114; + uint32_t reserved_115; + uint32_t reserved_116; + uint32_t reserved_117; + uint32_t reserved_118; + uint32_t reserved_119; + uint32_t reserved_120; + uint32_t reserved_121; + uint32_t reserved_122; + uint32_t reserved_123; + uint32_t reserved_124; + uint32_t reserved_125; + uint32_t reserved_126; + uint32_t reserved_127; +}; + +struct vi_mqd { + uint32_t header; + uint32_t compute_dispatch_initiator; + uint32_t compute_dim_x; + uint32_t compute_dim_y; + uint32_t compute_dim_z; + uint32_t compute_start_x; + uint32_t compute_start_y; + uint32_t compute_start_z; + uint32_t compute_num_thread_x; + uint32_t compute_num_thread_y; + uint32_t compute_num_thread_z; + uint32_t compute_pipelinestat_enable; + uint32_t compute_perfcount_enable; + uint32_t compute_pgm_lo; + uint32_t compute_pgm_hi; + uint32_t compute_tba_lo; + uint32_t compute_tba_hi; + uint32_t compute_tma_lo; + uint32_t compute_tma_hi; + uint32_t compute_pgm_rsrc1; + uint32_t compute_pgm_rsrc2; + uint32_t compute_vmid; + uint32_t compute_resource_limits; + uint32_t compute_static_thread_mgmt_se0; + uint32_t compute_static_thread_mgmt_se1; + uint32_t compute_tmpring_size; + uint32_t compute_static_thread_mgmt_se2; + uint32_t compute_static_thread_mgmt_se3; + uint32_t compute_restart_x; + uint32_t compute_restart_y; + uint32_t compute_restart_z; + uint32_t compute_thread_trace_enable; + uint32_t compute_misc_reserved; + uint32_t compute_dispatch_id; + uint32_t compute_threadgroup_id; + uint32_t compute_relaunch; + uint32_t compute_wave_restore_addr_lo; + uint32_t compute_wave_restore_addr_hi; + uint32_t compute_wave_restore_control; + uint32_t reserved_39; + uint32_t reserved_40; + uint32_t reserved_41; + uint32_t reserved_42; + uint32_t reserved_43; + uint32_t reserved_44; + uint32_t reserved_45; + uint32_t reserved_46; + uint32_t reserved_47; + uint32_t reserved_48; + uint32_t reserved_49; + uint32_t reserved_50; + uint32_t reserved_51; + uint32_t reserved_52; + uint32_t reserved_53; + uint32_t reserved_54; + uint32_t reserved_55; + uint32_t reserved_56; + uint32_t reserved_57; + uint32_t reserved_58; + uint32_t reserved_59; + uint32_t reserved_60; + uint32_t reserved_61; + uint32_t reserved_62; + uint32_t reserved_63; + uint32_t reserved_64; + uint32_t compute_user_data_0; + uint32_t compute_user_data_1; + uint32_t compute_user_data_2; + uint32_t compute_user_data_3; + uint32_t compute_user_data_4; + uint32_t compute_user_data_5; + uint32_t compute_user_data_6; + uint32_t compute_user_data_7; + uint32_t compute_user_data_8; + uint32_t compute_user_data_9; + uint32_t compute_user_data_10; + uint32_t compute_user_data_11; + uint32_t compute_user_data_12; + uint32_t compute_user_data_13; + uint32_t compute_user_data_14; + uint32_t compute_user_data_15; + uint32_t cp_compute_csinvoc_count_lo; + uint32_t cp_compute_csinvoc_count_hi; + uint32_t reserved_83; + uint32_t reserved_84; + uint32_t reserved_85; + uint32_t cp_mqd_query_time_lo; + uint32_t cp_mqd_query_time_hi; + uint32_t cp_mqd_connect_start_time_lo; + uint32_t cp_mqd_connect_start_time_hi; + uint32_t cp_mqd_connect_end_time_lo; + uint32_t cp_mqd_connect_end_time_hi; + uint32_t cp_mqd_connect_end_wf_count; + uint32_t cp_mqd_connect_end_pq_rptr; + uint32_t cp_mqd_connect_end_pq_wptr; + uint32_t cp_mqd_connect_end_ib_rptr; + uint32_t reserved_96; + uint32_t reserved_97; + uint32_t cp_mqd_save_start_time_lo; + uint32_t cp_mqd_save_start_time_hi; + uint32_t cp_mqd_save_end_time_lo; + uint32_t cp_mqd_save_end_time_hi; + uint32_t cp_mqd_restore_start_time_lo; + uint32_t cp_mqd_restore_start_time_hi; + uint32_t cp_mqd_restore_end_time_lo; + uint32_t cp_mqd_restore_end_time_hi; + uint32_t reserved_106; + uint32_t reserved_107; + uint32_t gds_cs_ctxsw_cnt0; + uint32_t gds_cs_ctxsw_cnt1; + uint32_t gds_cs_ctxsw_cnt2; + uint32_t gds_cs_ctxsw_cnt3; + uint32_t reserved_112; + uint32_t reserved_113; + uint32_t cp_pq_exe_status_lo; + uint32_t cp_pq_exe_status_hi; + uint32_t cp_packet_id_lo; + uint32_t cp_packet_id_hi; + uint32_t cp_packet_exe_status_lo; + uint32_t cp_packet_exe_status_hi; + uint32_t gds_save_base_addr_lo; + uint32_t gds_save_base_addr_hi; + uint32_t gds_save_mask_lo; + uint32_t gds_save_mask_hi; + uint32_t ctx_save_base_addr_lo; + uint32_t ctx_save_base_addr_hi; + uint32_t reserved_126; + uint32_t reserved_127; + uint32_t cp_mqd_base_addr_lo; + uint32_t cp_mqd_base_addr_hi; + uint32_t cp_hqd_active; + uint32_t cp_hqd_vmid; + uint32_t cp_hqd_persistent_state; + uint32_t cp_hqd_pipe_priority; + uint32_t cp_hqd_queue_priority; + uint32_t cp_hqd_quantum; + uint32_t cp_hqd_pq_base_lo; + uint32_t cp_hqd_pq_base_hi; + uint32_t cp_hqd_pq_rptr; + uint32_t cp_hqd_pq_rptr_report_addr_lo; + uint32_t cp_hqd_pq_rptr_report_addr_hi; + uint32_t cp_hqd_pq_wptr_poll_addr_lo; + uint32_t cp_hqd_pq_wptr_poll_addr_hi; + uint32_t cp_hqd_pq_doorbell_control; + uint32_t cp_hqd_pq_wptr; + uint32_t cp_hqd_pq_control; + uint32_t cp_hqd_ib_base_addr_lo; + uint32_t cp_hqd_ib_base_addr_hi; + uint32_t cp_hqd_ib_rptr; + uint32_t cp_hqd_ib_control; + uint32_t cp_hqd_iq_timer; + uint32_t cp_hqd_iq_rptr; + uint32_t cp_hqd_dequeue_request; + uint32_t cp_hqd_dma_offload; + uint32_t cp_hqd_sema_cmd; + uint32_t cp_hqd_msg_type; + uint32_t cp_hqd_atomic0_preop_lo; + uint32_t cp_hqd_atomic0_preop_hi; + uint32_t cp_hqd_atomic1_preop_lo; + uint32_t cp_hqd_atomic1_preop_hi; + uint32_t cp_hqd_hq_status0; + uint32_t cp_hqd_hq_control0; + uint32_t cp_mqd_control; + uint32_t cp_hqd_hq_status1; + uint32_t cp_hqd_hq_control1; + uint32_t cp_hqd_eop_base_addr_lo; + uint32_t cp_hqd_eop_base_addr_hi; + uint32_t cp_hqd_eop_control; + uint32_t cp_hqd_eop_rptr; + uint32_t cp_hqd_eop_wptr; + uint32_t cp_hqd_eop_done_events; + uint32_t cp_hqd_ctx_save_base_addr_lo; + uint32_t cp_hqd_ctx_save_base_addr_hi; + uint32_t cp_hqd_ctx_save_control; + uint32_t cp_hqd_cntl_stack_offset; + uint32_t cp_hqd_cntl_stack_size; + uint32_t cp_hqd_wg_state_offset; + uint32_t cp_hqd_ctx_save_size; + uint32_t cp_hqd_gds_resource_state; + uint32_t cp_hqd_error; + uint32_t cp_hqd_eop_wptr_mem; + uint32_t cp_hqd_eop_dones; + uint32_t reserved_182; + uint32_t reserved_183; + uint32_t reserved_184; + uint32_t reserved_185; + uint32_t reserved_186; + uint32_t reserved_187; + uint32_t reserved_188; + uint32_t reserved_189; + uint32_t reserved_190; + uint32_t reserved_191; + uint32_t iqtimer_pkt_header; + uint32_t iqtimer_pkt_dw0; + uint32_t iqtimer_pkt_dw1; + uint32_t iqtimer_pkt_dw2; + uint32_t iqtimer_pkt_dw3; + uint32_t iqtimer_pkt_dw4; + uint32_t iqtimer_pkt_dw5; + uint32_t iqtimer_pkt_dw6; + uint32_t iqtimer_pkt_dw7; + uint32_t iqtimer_pkt_dw8; + uint32_t iqtimer_pkt_dw9; + uint32_t iqtimer_pkt_dw10; + uint32_t iqtimer_pkt_dw11; + uint32_t iqtimer_pkt_dw12; + uint32_t iqtimer_pkt_dw13; + uint32_t iqtimer_pkt_dw14; + uint32_t iqtimer_pkt_dw15; + uint32_t iqtimer_pkt_dw16; + uint32_t iqtimer_pkt_dw17; + uint32_t iqtimer_pkt_dw18; + uint32_t iqtimer_pkt_dw19; + uint32_t iqtimer_pkt_dw20; + uint32_t iqtimer_pkt_dw21; + uint32_t iqtimer_pkt_dw22; + uint32_t iqtimer_pkt_dw23; + uint32_t iqtimer_pkt_dw24; + uint32_t iqtimer_pkt_dw25; + uint32_t iqtimer_pkt_dw26; + uint32_t iqtimer_pkt_dw27; + uint32_t iqtimer_pkt_dw28; + uint32_t iqtimer_pkt_dw29; + uint32_t iqtimer_pkt_dw30; + uint32_t iqtimer_pkt_dw31; + uint32_t reserved_225; + uint32_t reserved_226; + uint32_t reserved_227; + uint32_t set_resources_header; + uint32_t set_resources_dw1; + uint32_t set_resources_dw2; + uint32_t set_resources_dw3; + uint32_t set_resources_dw4; + uint32_t set_resources_dw5; + uint32_t set_resources_dw6; + uint32_t set_resources_dw7; + uint32_t reserved_236; + uint32_t reserved_237; + uint32_t reserved_238; + uint32_t reserved_239; + uint32_t queue_doorbell_id0; + uint32_t queue_doorbell_id1; + uint32_t queue_doorbell_id2; + uint32_t queue_doorbell_id3; + uint32_t queue_doorbell_id4; + uint32_t queue_doorbell_id5; + uint32_t queue_doorbell_id6; + uint32_t queue_doorbell_id7; + uint32_t queue_doorbell_id8; + uint32_t queue_doorbell_id9; + uint32_t queue_doorbell_id10; + uint32_t queue_doorbell_id11; + uint32_t queue_doorbell_id12; + uint32_t queue_doorbell_id13; + uint32_t queue_doorbell_id14; + uint32_t queue_doorbell_id15; +}; + +#endif /* VI_STRUCTS_H_ */ diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h new file mode 100644 index 000000000..144f50acc --- /dev/null +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h @@ -0,0 +1,41 @@ +#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _GPU_SCHED_TRACE_H_ + +#include +#include +#include + +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM gpu_sched +#define TRACE_INCLUDE_FILE gpu_sched_trace + +TRACE_EVENT(amd_sched_job, + TP_PROTO(struct amd_sched_job *sched_job), + TP_ARGS(sched_job), + TP_STRUCT__entry( + __field(struct amd_sched_entity *, entity) + __field(const char *, name) + __field(u32, job_count) + __field(int, hw_job_count) + ), + + TP_fast_assign( + __entry->entity = sched_job->s_entity; + __entry->name = sched_job->sched->name; + __entry->job_count = kfifo_len( + &sched_job->s_entity->job_queue) / sizeof(sched_job); + __entry->hw_job_count = atomic_read( + &sched_job->sched->hw_rq_count); + ), + TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d", + __entry->entity, __entry->name, __entry->job_count, + __entry->hw_job_count) +); +#endif + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c new file mode 100644 index 000000000..3697eeeec --- /dev/null +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -0,0 +1,425 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#include +#include +#include +#include +#include "gpu_scheduler.h" + +#define CREATE_TRACE_POINTS +#include "gpu_sched_trace.h" + +static struct amd_sched_job * +amd_sched_entity_pop_job(struct amd_sched_entity *entity); +static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); + +/* Initialize a given run queue struct */ +static void amd_sched_rq_init(struct amd_sched_rq *rq) +{ + spin_lock_init(&rq->lock); + INIT_LIST_HEAD(&rq->entities); + rq->current_entity = NULL; +} + +static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, + struct amd_sched_entity *entity) +{ + spin_lock(&rq->lock); + list_add_tail(&entity->list, &rq->entities); + spin_unlock(&rq->lock); +} + +static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, + struct amd_sched_entity *entity) +{ + spin_lock(&rq->lock); + list_del_init(&entity->list); + if (rq->current_entity == entity) + rq->current_entity = NULL; + spin_unlock(&rq->lock); +} + +/** + * Select next job from a specified run queue with round robin policy. + * Return NULL if nothing available. + */ +static struct amd_sched_job * +amd_sched_rq_select_job(struct amd_sched_rq *rq) +{ + struct amd_sched_entity *entity; + struct amd_sched_job *sched_job; + + spin_lock(&rq->lock); + + entity = rq->current_entity; + if (entity) { + list_for_each_entry_continue(entity, &rq->entities, list) { + sched_job = amd_sched_entity_pop_job(entity); + if (sched_job) { + rq->current_entity = entity; + spin_unlock(&rq->lock); + return sched_job; + } + } + } + + list_for_each_entry(entity, &rq->entities, list) { + + sched_job = amd_sched_entity_pop_job(entity); + if (sched_job) { + rq->current_entity = entity; + spin_unlock(&rq->lock); + return sched_job; + } + + if (entity == rq->current_entity) + break; + } + + spin_unlock(&rq->lock); + + return NULL; +} + +/** + * Init a context entity used by scheduler when submit to HW ring. + * + * @sched The pointer to the scheduler + * @entity The pointer to a valid amd_sched_entity + * @rq The run queue this entity belongs + * @kernel If this is an entity for the kernel + * @jobs The max number of jobs in the job queue + * + * return 0 if succeed. negative error code on failure +*/ +int amd_sched_entity_init(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity, + struct amd_sched_rq *rq, + uint32_t jobs) +{ + int r; + + if (!(sched && entity && rq)) + return -EINVAL; + + memset(entity, 0, sizeof(struct amd_sched_entity)); + INIT_LIST_HEAD(&entity->list); + entity->rq = rq; + entity->sched = sched; + + spin_lock_init(&entity->queue_lock); + r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL); + if (r) + return r; + + atomic_set(&entity->fence_seq, 0); + entity->fence_context = fence_context_alloc(1); + + /* Add the entity to the run queue */ + amd_sched_rq_add_entity(rq, entity); + + return 0; +} + +/** + * Query if entity is initialized + * + * @sched Pointer to scheduler instance + * @entity The pointer to a valid scheduler entity + * + * return true if entity is initialized, false otherwise +*/ +static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity) +{ + return entity->sched == sched && + entity->rq != NULL; +} + +/** + * Check if entity is idle + * + * @entity The pointer to a valid scheduler entity + * + * Return true if entity don't has any unscheduled jobs. + */ +static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) +{ + rmb(); + if (kfifo_is_empty(&entity->job_queue)) + return true; + + return false; +} + +/** + * Destroy a context entity + * + * @sched Pointer to scheduler instance + * @entity The pointer to a valid scheduler entity + * + * Cleanup and free the allocated resources. + */ +void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity) +{ + struct amd_sched_rq *rq = entity->rq; + + if (!amd_sched_entity_is_initialized(sched, entity)) + return; + + /** + * The client will not queue more IBs during this fini, consume existing + * queued IBs + */ + wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); + + amd_sched_rq_remove_entity(rq, entity); + kfifo_free(&entity->job_queue); +} + +static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) +{ + struct amd_sched_entity *entity = + container_of(cb, struct amd_sched_entity, cb); + entity->dependency = NULL; + fence_put(f); + amd_sched_wakeup(entity->sched); +} + +static struct amd_sched_job * +amd_sched_entity_pop_job(struct amd_sched_entity *entity) +{ + struct amd_gpu_scheduler *sched = entity->sched; + struct amd_sched_job *sched_job; + + if (ACCESS_ONCE(entity->dependency)) + return NULL; + + if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) + return NULL; + + while ((entity->dependency = sched->ops->dependency(sched_job))) { + + if (fence_add_callback(entity->dependency, &entity->cb, + amd_sched_entity_wakeup)) + fence_put(entity->dependency); + else + return NULL; + } + + return sched_job; +} + +/** + * Helper to submit a job to the job queue + * + * @sched_job The pointer to job required to submit + * + * Returns true if we could submit the job. + */ +static bool amd_sched_entity_in(struct amd_sched_job *sched_job) +{ + struct amd_sched_entity *entity = sched_job->s_entity; + bool added, first = false; + + spin_lock(&entity->queue_lock); + added = kfifo_in(&entity->job_queue, &sched_job, + sizeof(sched_job)) == sizeof(sched_job); + + if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job)) + first = true; + + spin_unlock(&entity->queue_lock); + + /* first job wakes up scheduler */ + if (first) + amd_sched_wakeup(sched_job->sched); + + return added; +} + +/** + * Submit a job to the job queue + * + * @sched_job The pointer to job required to submit + * + * Returns 0 for success, negative error code otherwise. + */ +int amd_sched_entity_push_job(struct amd_sched_job *sched_job) +{ + struct amd_sched_entity *entity = sched_job->s_entity; + struct amd_sched_fence *fence = amd_sched_fence_create( + entity, sched_job->owner); + + if (!fence) + return -ENOMEM; + + fence_get(&fence->base); + sched_job->s_fence = fence; + + wait_event(entity->sched->job_scheduled, + amd_sched_entity_in(sched_job)); + trace_amd_sched_job(sched_job); + return 0; +} + +/** + * Return ture if we can push more jobs to the hw. + */ +static bool amd_sched_ready(struct amd_gpu_scheduler *sched) +{ + return atomic_read(&sched->hw_rq_count) < + sched->hw_submission_limit; +} + +/** + * Wake up the scheduler when it is ready + */ +static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) +{ + if (amd_sched_ready(sched)) + wake_up_interruptible(&sched->wake_up_worker); +} + +/** + * Select next to run +*/ +static struct amd_sched_job * +amd_sched_select_job(struct amd_gpu_scheduler *sched) +{ + struct amd_sched_job *sched_job; + + if (!amd_sched_ready(sched)) + return NULL; + + /* Kernel run queue has higher priority than normal run queue*/ + sched_job = amd_sched_rq_select_job(&sched->kernel_rq); + if (sched_job == NULL) + sched_job = amd_sched_rq_select_job(&sched->sched_rq); + + return sched_job; +} + +static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) +{ + struct amd_sched_fence *s_fence = + container_of(cb, struct amd_sched_fence, cb); + struct amd_gpu_scheduler *sched = s_fence->sched; + + atomic_dec(&sched->hw_rq_count); + amd_sched_fence_signal(s_fence); + fence_put(&s_fence->base); + wake_up_interruptible(&sched->wake_up_worker); +} + +static int amd_sched_main(void *param) +{ + struct sched_param sparam = {.sched_priority = 1}; + struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; + int r, count; + + sched_setscheduler(current, SCHED_FIFO, &sparam); + + while (!kthread_should_stop()) { + struct amd_sched_entity *entity; + struct amd_sched_fence *s_fence; + struct amd_sched_job *sched_job; + struct fence *fence; + + wait_event_interruptible(sched->wake_up_worker, + kthread_should_stop() || + (sched_job = amd_sched_select_job(sched))); + + if (!sched_job) + continue; + + entity = sched_job->s_entity; + s_fence = sched_job->s_fence; + atomic_inc(&sched->hw_rq_count); + fence = sched->ops->run_job(sched_job); + if (fence) { + r = fence_add_callback(fence, &s_fence->cb, + amd_sched_process_job); + if (r == -ENOENT) + amd_sched_process_job(fence, &s_fence->cb); + else if (r) + DRM_ERROR("fence add callback failed (%d)\n", r); + fence_put(fence); + } else { + DRM_ERROR("Failed to run job!\n"); + amd_sched_process_job(NULL, &s_fence->cb); + } + + count = kfifo_out(&entity->job_queue, &sched_job, + sizeof(sched_job)); + WARN_ON(count != sizeof(sched_job)); + wake_up(&sched->job_scheduled); + } + return 0; +} + +/** + * Init a gpu scheduler instance + * + * @sched The pointer to the scheduler + * @ops The backend operations for this scheduler. + * @hw_submissions Number of hw submissions to do. + * @name Name used for debugging + * + * Return 0 on success, otherwise error code. +*/ +int amd_sched_init(struct amd_gpu_scheduler *sched, + struct amd_sched_backend_ops *ops, + unsigned hw_submission, const char *name) +{ + sched->ops = ops; + sched->hw_submission_limit = hw_submission; + sched->name = name; + amd_sched_rq_init(&sched->sched_rq); + amd_sched_rq_init(&sched->kernel_rq); + + init_waitqueue_head(&sched->wake_up_worker); + init_waitqueue_head(&sched->job_scheduled); + atomic_set(&sched->hw_rq_count, 0); + + /* Each scheduler will run on a seperate kernel thread */ + sched->thread = kthread_run(amd_sched_main, sched, sched->name); + if (IS_ERR(sched->thread)) { + DRM_ERROR("Failed to create scheduler for %s.\n", name); + return PTR_ERR(sched->thread); + } + + return 0; +} + +/** + * Destroy a gpu scheduler + * + * @sched The pointer to the scheduler + */ +void amd_sched_fini(struct amd_gpu_scheduler *sched) +{ + kthread_stop(sched->thread); +} diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h new file mode 100644 index 000000000..80b64dc22 --- /dev/null +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -0,0 +1,133 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _GPU_SCHEDULER_H_ +#define _GPU_SCHEDULER_H_ + +#include +#include + +struct amd_gpu_scheduler; +struct amd_sched_rq; + +/** + * A scheduler entity is a wrapper around a job queue or a group + * of other entities. Entities take turns emitting jobs from their + * job queues to corresponding hardware ring based on scheduling + * policy. +*/ +struct amd_sched_entity { + struct list_head list; + struct amd_sched_rq *rq; + struct amd_gpu_scheduler *sched; + + spinlock_t queue_lock; + struct kfifo job_queue; + + atomic_t fence_seq; + uint64_t fence_context; + + struct fence *dependency; + struct fence_cb cb; +}; + +/** + * Run queue is a set of entities scheduling command submissions for + * one specific ring. It implements the scheduling policy that selects + * the next entity to emit commands from. +*/ +struct amd_sched_rq { + spinlock_t lock; + struct list_head entities; + struct amd_sched_entity *current_entity; +}; + +struct amd_sched_fence { + struct fence base; + struct fence_cb cb; + struct amd_gpu_scheduler *sched; + spinlock_t lock; + void *owner; +}; + +struct amd_sched_job { + struct amd_gpu_scheduler *sched; + struct amd_sched_entity *s_entity; + struct amd_sched_fence *s_fence; + void *owner; +}; + +extern const struct fence_ops amd_sched_fence_ops; +static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) +{ + struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base); + + if (__f->base.ops == &amd_sched_fence_ops) + return __f; + + return NULL; +} + +/** + * Define the backend operations called by the scheduler, + * these functions should be implemented in driver side +*/ +struct amd_sched_backend_ops { + struct fence *(*dependency)(struct amd_sched_job *sched_job); + struct fence *(*run_job)(struct amd_sched_job *sched_job); +}; + +/** + * One scheduler is implemented for each hardware ring +*/ +struct amd_gpu_scheduler { + struct amd_sched_backend_ops *ops; + uint32_t hw_submission_limit; + const char *name; + struct amd_sched_rq sched_rq; + struct amd_sched_rq kernel_rq; + wait_queue_head_t wake_up_worker; + wait_queue_head_t job_scheduled; + atomic_t hw_rq_count; + struct task_struct *thread; +}; + +int amd_sched_init(struct amd_gpu_scheduler *sched, + struct amd_sched_backend_ops *ops, + uint32_t hw_submission, const char *name); +void amd_sched_fini(struct amd_gpu_scheduler *sched); + +int amd_sched_entity_init(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity, + struct amd_sched_rq *rq, + uint32_t jobs); +void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity); +int amd_sched_entity_push_job(struct amd_sched_job *sched_job); + +struct amd_sched_fence *amd_sched_fence_create( + struct amd_sched_entity *s_entity, void *owner); +void amd_sched_fence_signal(struct amd_sched_fence *fence); + + +#endif diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c new file mode 100644 index 000000000..d80263809 --- /dev/null +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -0,0 +1,81 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#include +#include +#include +#include +#include "gpu_scheduler.h" + +struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner) +{ + struct amd_sched_fence *fence = NULL; + unsigned seq; + + fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL); + if (fence == NULL) + return NULL; + fence->owner = owner; + fence->sched = s_entity->sched; + spin_lock_init(&fence->lock); + + seq = atomic_inc_return(&s_entity->fence_seq); + fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock, + s_entity->fence_context, seq); + + return fence; +} + +void amd_sched_fence_signal(struct amd_sched_fence *fence) +{ + int ret = fence_signal(&fence->base); + if (!ret) + FENCE_TRACE(&fence->base, "signaled from irq context\n"); + else + FENCE_TRACE(&fence->base, "was already signaled\n"); +} + +static const char *amd_sched_fence_get_driver_name(struct fence *fence) +{ + return "amd_sched"; +} + +static const char *amd_sched_fence_get_timeline_name(struct fence *f) +{ + struct amd_sched_fence *fence = to_amd_sched_fence(f); + return (const char *)fence->sched->name; +} + +static bool amd_sched_fence_enable_signaling(struct fence *f) +{ + return true; +} + +const struct fence_ops amd_sched_fence_ops = { + .get_driver_name = amd_sched_fence_get_driver_name, + .get_timeline_name = amd_sched_fence_get_timeline_name, + .enable_signaling = amd_sched_fence_enable_signaling, + .signaled = NULL, + .wait = fence_default_wait, + .release = NULL, +}; diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index 7838e731b..7d03c51ab 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -22,9 +22,9 @@ static /*const*/ struct fb_ops armada_fb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, + .fb_fillrect = drm_fb_helper_cfb_fillrect, + .fb_copyarea = drm_fb_helper_cfb_copyarea, + .fb_imageblit = drm_fb_helper_cfb_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, @@ -80,18 +80,12 @@ static int armada_fb_create(struct drm_fb_helper *fbh, if (IS_ERR(dfb)) return PTR_ERR(dfb); - info = framebuffer_alloc(0, dev->dev); - if (!info) { - ret = -ENOMEM; + info = drm_fb_helper_alloc_fbi(fbh); + if (IS_ERR(info)) { + ret = PTR_ERR(info); goto err_fballoc; } - ret = fb_alloc_cmap(&info->cmap, 256, 0); - if (ret) { - ret = -ENOMEM; - goto err_fbcmap; - } - strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id)); info->par = fbh; info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; @@ -101,7 +95,7 @@ static int armada_fb_create(struct drm_fb_helper *fbh, info->screen_size = obj->obj.size; info->screen_base = ptr; fbh->fb = &dfb->fb; - fbh->fbdev = info; + drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth); drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height); @@ -111,8 +105,6 @@ static int armada_fb_create(struct drm_fb_helper *fbh, return 0; - err_fbcmap: - framebuffer_release(info); err_fballoc: dfb->fb.funcs->destroy(&dfb->fb); return ret; @@ -171,6 +163,7 @@ int armada_fbdev_init(struct drm_device *dev) return 0; err_fb_setup: + drm_fb_helper_release_fbi(fbh); drm_fb_helper_fini(fbh); err_fb_helper: priv->fbdev = NULL; @@ -191,14 +184,8 @@ void armada_fbdev_fini(struct drm_device *dev) struct drm_fb_helper *fbh = priv->fbdev; if (fbh) { - struct fb_info *info = fbh->fbdev; - - if (info) { - unregister_framebuffer(info); - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - framebuffer_release(info); - } + drm_fb_helper_unregister_fbi(fbh); + drm_fb_helper_release_fbi(fbh); drm_fb_helper_fini(fbh); diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index ff68eefae..f31db28a6 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c @@ -125,7 +125,7 @@ static void ast_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct ast_fbdev *afbdev = info->par; - sys_fillrect(info, rect); + drm_fb_helper_sys_fillrect(info, rect); ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width, rect->height); } @@ -134,7 +134,7 @@ static void ast_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct ast_fbdev *afbdev = info->par; - sys_copyarea(info, area); + drm_fb_helper_sys_copyarea(info, area); ast_dirty_update(afbdev, area->dx, area->dy, area->width, area->height); } @@ -143,7 +143,7 @@ static void ast_imageblit(struct fb_info *info, const struct fb_image *image) { struct ast_fbdev *afbdev = info->par; - sys_imageblit(info, image); + drm_fb_helper_sys_imageblit(info, image); ast_dirty_update(afbdev, image->dx, image->dy, image->width, image->height); } @@ -193,7 +193,6 @@ static int astfb_create(struct drm_fb_helper *helper, struct drm_framebuffer *fb; struct fb_info *info; int size, ret; - struct device *device = &dev->pdev->dev; void *sysram; struct drm_gem_object *gobj = NULL; struct ast_bo *bo = NULL; @@ -217,40 +216,28 @@ static int astfb_create(struct drm_fb_helper *helper, if (!sysram) return -ENOMEM; - info = framebuffer_alloc(0, device); - if (!info) { - ret = -ENOMEM; - goto out; + info = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(info)) { + ret = PTR_ERR(info); + goto err_free_vram; } info->par = afbdev; ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj); if (ret) - goto out; + goto err_release_fbi; afbdev->sysram = sysram; afbdev->size = size; fb = &afbdev->afb.base; afbdev->helper.fb = fb; - afbdev->helper.fbdev = info; strcpy(info->fix.id, "astdrmfb"); info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &astfb_ops; - ret = fb_alloc_cmap(&info->cmap, 256, 0); - if (ret) { - ret = -ENOMEM; - goto out; - } - - info->apertures = alloc_apertures(1); - if (!info->apertures) { - ret = -ENOMEM; - goto out; - } info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0); info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); @@ -266,7 +253,11 @@ static int astfb_create(struct drm_fb_helper *helper, fb->width, fb->height); return 0; -out: + +err_release_fbi: + drm_fb_helper_release_fbi(helper); +err_free_vram: + vfree(afbdev->sysram); return ret; } @@ -297,15 +288,10 @@ static const struct drm_fb_helper_funcs ast_fb_helper_funcs = { static void ast_fbdev_destroy(struct drm_device *dev, struct ast_fbdev *afbdev) { - struct fb_info *info; struct ast_framebuffer *afb = &afbdev->afb; - if (afbdev->helper.fbdev) { - info = afbdev->helper.fbdev; - unregister_framebuffer(info); - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - framebuffer_release(info); - } + + drm_fb_helper_unregister_fbi(&afbdev->helper); + drm_fb_helper_release_fbi(&afbdev->helper); if (afb->obj) { drm_gem_object_unreference_unlocked(afb->obj); @@ -377,5 +363,5 @@ void ast_fbdev_set_suspend(struct drm_device *dev, int state) if (!ast->fbdev) return; - fb_set_suspend(ast->fbdev->helper.fbdev, state); + drm_fb_helper_set_suspend(&ast->fbdev->helper, state); } diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 035dacc93..838217f8c 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -571,24 +571,18 @@ ast_dumb_mmap_offset(struct drm_file *file, uint64_t *offset) { struct drm_gem_object *obj; - int ret; struct ast_bo *bo; - mutex_lock(&dev->struct_mutex); obj = drm_gem_object_lookup(dev, file, handle); - if (obj == NULL) { - ret = -ENOENT; - goto out_unlock; - } + if (obj == NULL) + return -ENOENT; bo = gem_to_ast_bo(obj); *offset = ast_bo_mmap_offset(bo); - drm_gem_object_unreference(obj); - ret = 0; -out_unlock: - mutex_unlock(&dev->struct_mutex); - return ret; + drm_gem_object_unreference_unlocked(obj); + + return 0; } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index 5ae5c6923..9f6e234e7 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -239,7 +239,8 @@ static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c, return atmel_hlcdc_plane_prepare_disc_area(s); } -static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c) +static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c, + struct drm_crtc_state *old_s) { struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); @@ -253,7 +254,8 @@ static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c) } } -static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc) +static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_s) { /* TODO: write common plane control register if available */ } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index ef6182bc8..8bc62ec40 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -29,6 +29,115 @@ #define ATMEL_HLCDC_LAYER_IRQS_OFFSET 8 +static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9n12_layers[] = { + { + .name = "base", + .formats = &atmel_hlcdc_plane_rgb_formats, + .regs_offset = 0x40, + .id = 0, + .type = ATMEL_HLCDC_BASE_LAYER, + .nconfigs = 5, + .layout = { + .xstride = { 2 }, + .default_color = 3, + .general_config = 4, + }, + }, +}; + +static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9n12 = { + .min_width = 0, + .min_height = 0, + .max_width = 1280, + .max_height = 860, + .nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9n12_layers), + .layers = atmel_hlcdc_at91sam9n12_layers, +}; + +static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = { + { + .name = "base", + .formats = &atmel_hlcdc_plane_rgb_formats, + .regs_offset = 0x40, + .id = 0, + .type = ATMEL_HLCDC_BASE_LAYER, + .nconfigs = 5, + .layout = { + .xstride = { 2 }, + .default_color = 3, + .general_config = 4, + .disc_pos = 5, + .disc_size = 6, + }, + }, + { + .name = "overlay1", + .formats = &atmel_hlcdc_plane_rgb_formats, + .regs_offset = 0x100, + .id = 1, + .type = ATMEL_HLCDC_OVERLAY_LAYER, + .nconfigs = 10, + .layout = { + .pos = 2, + .size = 3, + .xstride = { 4 }, + .pstride = { 5 }, + .default_color = 6, + .chroma_key = 7, + .chroma_key_mask = 8, + .general_config = 9, + }, + }, + { + .name = "high-end-overlay", + .formats = &atmel_hlcdc_plane_rgb_and_yuv_formats, + .regs_offset = 0x280, + .id = 2, + .type = ATMEL_HLCDC_OVERLAY_LAYER, + .nconfigs = 17, + .layout = { + .pos = 2, + .size = 3, + .memsize = 4, + .xstride = { 5, 7 }, + .pstride = { 6, 8 }, + .default_color = 9, + .chroma_key = 10, + .chroma_key_mask = 11, + .general_config = 12, + .csc = 14, + }, + }, + { + .name = "cursor", + .formats = &atmel_hlcdc_plane_rgb_formats, + .regs_offset = 0x340, + .id = 3, + .type = ATMEL_HLCDC_CURSOR_LAYER, + .nconfigs = 10, + .max_width = 128, + .max_height = 128, + .layout = { + .pos = 2, + .size = 3, + .xstride = { 4 }, + .default_color = 6, + .chroma_key = 7, + .chroma_key_mask = 8, + .general_config = 9, + }, + }, +}; + +static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9x5 = { + .min_width = 0, + .min_height = 0, + .max_width = 800, + .max_height = 600, + .nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9x5_layers), + .layers = atmel_hlcdc_at91sam9x5_layers, +}; + static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = { { .name = "base", @@ -132,11 +241,105 @@ static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d3 = { .layers = atmel_hlcdc_sama5d3_layers, }; +static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = { + { + .name = "base", + .formats = &atmel_hlcdc_plane_rgb_formats, + .regs_offset = 0x40, + .id = 0, + .type = ATMEL_HLCDC_BASE_LAYER, + .nconfigs = 7, + .layout = { + .xstride = { 2 }, + .default_color = 3, + .general_config = 4, + .disc_pos = 5, + .disc_size = 6, + }, + }, + { + .name = "overlay1", + .formats = &atmel_hlcdc_plane_rgb_formats, + .regs_offset = 0x140, + .id = 1, + .type = ATMEL_HLCDC_OVERLAY_LAYER, + .nconfigs = 10, + .layout = { + .pos = 2, + .size = 3, + .xstride = { 4 }, + .pstride = { 5 }, + .default_color = 6, + .chroma_key = 7, + .chroma_key_mask = 8, + .general_config = 9, + }, + }, + { + .name = "overlay2", + .formats = &atmel_hlcdc_plane_rgb_formats, + .regs_offset = 0x240, + .id = 2, + .type = ATMEL_HLCDC_OVERLAY_LAYER, + .nconfigs = 10, + .layout = { + .pos = 2, + .size = 3, + .xstride = { 4 }, + .pstride = { 5 }, + .default_color = 6, + .chroma_key = 7, + .chroma_key_mask = 8, + .general_config = 9, + }, + }, + { + .name = "high-end-overlay", + .formats = &atmel_hlcdc_plane_rgb_and_yuv_formats, + .regs_offset = 0x340, + .id = 3, + .type = ATMEL_HLCDC_OVERLAY_LAYER, + .nconfigs = 42, + .layout = { + .pos = 2, + .size = 3, + .memsize = 4, + .xstride = { 5, 7 }, + .pstride = { 6, 8 }, + .default_color = 9, + .chroma_key = 10, + .chroma_key_mask = 11, + .general_config = 12, + .csc = 14, + }, + }, +}; + +static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d4 = { + .min_width = 0, + .min_height = 0, + .max_width = 2048, + .max_height = 2048, + .nlayers = ARRAY_SIZE(atmel_hlcdc_sama5d4_layers), + .layers = atmel_hlcdc_sama5d4_layers, +}; static const struct of_device_id atmel_hlcdc_of_match[] = { + { + .compatible = "atmel,at91sam9n12-hlcdc", + .data = &atmel_hlcdc_dc_at91sam9n12, + }, + { + .compatible = "atmel,at91sam9x5-hlcdc", + .data = &atmel_hlcdc_dc_at91sam9x5, + }, { .compatible = "atmel,sama5d3-hlcdc", .data = &atmel_hlcdc_dc_sama5d3, }, + { + .compatible = "atmel,sama5d4-hlcdc", + .data = &atmel_hlcdc_dc_sama5d4, + }, { /* sentinel */ }, }; @@ -485,7 +688,9 @@ static const struct file_operations fops = { }; static struct drm_driver atmel_hlcdc_dc_driver = { - .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET, + .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | + DRIVER_MODESET | DRIVER_PRIME | + DRIVER_ATOMIC, .preclose = atmel_hlcdc_dc_preclose, .lastclose = atmel_hlcdc_dc_lastclose, .irq_handler = atmel_hlcdc_dc_irq_handler, @@ -497,6 +702,15 @@ static struct drm_driver atmel_hlcdc_dc_driver = { .disable_vblank = atmel_hlcdc_dc_disable_vblank, .gem_free_object = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, + .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, + .gem_prime_vmap = drm_gem_cma_prime_vmap, + .gem_prime_vunmap = drm_gem_cma_prime_vunmap, + .gem_prime_mmap = drm_gem_cma_prime_mmap, .dumb_create = drm_gem_cma_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, .dumb_destroy = drm_gem_dumb_destroy, diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c index 9c4513005..067e4c144 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c @@ -126,12 +126,16 @@ atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder, if (info->num_bus_formats) { switch (info->bus_formats[0]) { + case MEDIA_BUS_FMT_RGB565_1X16: + cfg |= ATMEL_HLCDC_CONNECTOR_RGB565 << 8; + break; case MEDIA_BUS_FMT_RGB666_1X18: cfg |= ATMEL_HLCDC_CONNECTOR_RGB666 << 8; break; case MEDIA_BUS_FMT_RGB888_1X24: cfg |= ATMEL_HLCDC_CONNECTOR_RGB888 << 8; break; + case MEDIA_BUS_FMT_RGB444_1X12: default: break; } diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index 98837bde2..7f1a3604b 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c @@ -109,7 +109,7 @@ static int bochs_pm_suspend(struct device *dev) if (bochs->fb.initialized) { console_lock(); - fb_set_suspend(bochs->fb.helper.fbdev, 1); + drm_fb_helper_set_suspend(&bochs->fb.helper, 1); console_unlock(); } @@ -126,7 +126,7 @@ static int bochs_pm_resume(struct device *dev) if (bochs->fb.initialized) { console_lock(); - fb_set_suspend(bochs->fb.helper.fbdev, 0); + drm_fb_helper_set_suspend(&bochs->fb.helper, 0); console_unlock(); } diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c index 976d9798d..09a0637aa 100644 --- a/drivers/gpu/drm/bochs/bochs_fbdev.c +++ b/drivers/gpu/drm/bochs/bochs_fbdev.c @@ -24,9 +24,9 @@ static struct fb_ops bochsfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, - .fb_fillrect = sys_fillrect, - .fb_copyarea = sys_copyarea, - .fb_imageblit = sys_imageblit, + .fb_fillrect = drm_fb_helper_sys_fillrect, + .fb_copyarea = drm_fb_helper_sys_copyarea, + .fb_imageblit = drm_fb_helper_sys_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, @@ -56,11 +56,9 @@ static int bochsfb_create(struct drm_fb_helper *helper, { struct bochs_device *bochs = container_of(helper, struct bochs_device, fb.helper); - struct drm_device *dev = bochs->dev; struct fb_info *info; struct drm_framebuffer *fb; struct drm_mode_fb_cmd2 mode_cmd; - struct device *device = &dev->pdev->dev; struct drm_gem_object *gobj = NULL; struct bochs_bo *bo = NULL; int size, ret; @@ -106,22 +104,23 @@ static int bochsfb_create(struct drm_fb_helper *helper, ttm_bo_unreserve(&bo->bo); /* init fb device */ - info = framebuffer_alloc(0, device); - if (info == NULL) - return -ENOMEM; + info = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(info)) + return PTR_ERR(info); info->par = &bochs->fb.helper; ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj); - if (ret) + if (ret) { + drm_fb_helper_release_fbi(helper); return ret; + } bochs->fb.size = size; /* setup helper */ fb = &bochs->fb.gfb.base; bochs->fb.helper.fb = fb; - bochs->fb.helper.fbdev = info; strcpy(info->fix.id, "bochsdrmfb"); @@ -139,30 +138,17 @@ static int bochsfb_create(struct drm_fb_helper *helper, info->fix.smem_start = 0; info->fix.smem_len = size; - ret = fb_alloc_cmap(&info->cmap, 256, 0); - if (ret) { - DRM_ERROR("%s: can't allocate color map\n", info->fix.id); - return -ENOMEM; - } - return 0; } static int bochs_fbdev_destroy(struct bochs_device *bochs) { struct bochs_framebuffer *gfb = &bochs->fb.gfb; - struct fb_info *info; DRM_DEBUG_DRIVER("\n"); - if (bochs->fb.helper.fbdev) { - info = bochs->fb.helper.fbdev; - - unregister_framebuffer(info); - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - framebuffer_release(info); - } + drm_fb_helper_unregister_fbi(&bochs->fb.helper); + drm_fb_helper_release_fbi(&bochs->fb.helper); if (gfb->obj) { drm_gem_object_unreference_unlocked(gfb->obj); diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index 66286ff51..f69e6bf9b 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c @@ -454,25 +454,17 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset) { struct drm_gem_object *obj; - int ret; struct bochs_bo *bo; - mutex_lock(&dev->struct_mutex); obj = drm_gem_object_lookup(dev, file, handle); - if (obj == NULL) { - ret = -ENOENT; - goto out_unlock; - } + if (obj == NULL) + return -ENOENT; bo = gem_to_bochs_bo(obj); *offset = bochs_bo_mmap_offset(bo); - drm_gem_object_unreference(obj); - ret = 0; -out_unlock: - mutex_unlock(&dev->struct_mutex); - return ret; - + drm_gem_object_unreference_unlocked(obj); + return 0; } /* ---------------------------------------------------------------------- */ diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index acef32237..2de52a53a 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -1,24 +1,32 @@ +config DRM_BRIDGE + def_bool y + depends on DRM + help + Bridge registration and lookup framework. + +menu "Display Interface Bridges" + depends on DRM && DRM_BRIDGE + config DRM_DW_HDMI tristate - depends on DRM select DRM_KMS_HELPER -config DRM_PTN3460 - tristate "PTN3460 DP/LVDS bridge" - depends on DRM +config DRM_NXP_PTN3460 + tristate "NXP PTN3460 DP/LVDS bridge" depends on OF select DRM_KMS_HELPER select DRM_PANEL ---help--- - ptn3460 eDP-LVDS bridge chip driver. + NXP PTN3460 eDP-LVDS bridge chip driver. -config DRM_PS8622 +config DRM_PARADE_PS8622 tristate "Parade eDP/LVDS bridge" - depends on DRM depends on OF select DRM_PANEL select DRM_KMS_HELPER select BACKLIGHT_LCD_SUPPORT select BACKLIGHT_CLASS_DEVICE ---help--- - parade eDP-LVDS bridge chip driver. + Parade eDP-LVDS bridge chip driver. + +endmenu diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 8dfebd984..e2eef1c2f 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -1,5 +1,5 @@ ccflags-y := -Iinclude/drm -obj-$(CONFIG_DRM_PS8622) += ps8622.o -obj-$(CONFIG_DRM_PTN3460) += ptn3460.o obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o +obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o +obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o diff --git a/drivers/gpu/drm/bridge/dw_hdmi.c b/drivers/gpu/drm/bridge/dw_hdmi.c index 816d104ca..0083d4e7e 100644 --- a/drivers/gpu/drm/bridge/dw_hdmi.c +++ b/drivers/gpu/drm/bridge/dw_hdmi.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -81,10 +82,6 @@ static const u16 csc_coeff_rgb_in_eitu709[3][4] = { }; struct hdmi_vmode { - bool mdvi; - bool mhsyncpolarity; - bool mvsyncpolarity; - bool minterlaced; bool mdataenablepolarity; unsigned int mpixelclock; @@ -123,12 +120,20 @@ struct dw_hdmi { bool phy_enabled; struct drm_display_mode previous_mode; - struct regmap *regmap; struct i2c_adapter *ddc; void __iomem *regs; + bool sink_is_hdmi; + bool sink_has_audio; + struct mutex mutex; /* for state below and previous_mode */ + bool disabled; /* DRM has disabled our bridge */ + + spinlock_t audio_lock; struct mutex audio_mutex; unsigned int sample_rate; + unsigned int audio_cts; + unsigned int audio_n; + bool audio_enable; int ratio; void (*write)(struct dw_hdmi *hdmi, u8 val, int offset); @@ -335,42 +340,76 @@ static unsigned int hdmi_compute_cts(unsigned int freq, unsigned long pixel_clk, } static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi, - unsigned long pixel_clk) + unsigned long pixel_clk, unsigned int sample_rate, unsigned int ratio) { - unsigned int clk_n, clk_cts; - - clk_n = hdmi_compute_n(hdmi->sample_rate, pixel_clk, - hdmi->ratio); - clk_cts = hdmi_compute_cts(hdmi->sample_rate, pixel_clk, - hdmi->ratio); + unsigned int n, cts; - if (!clk_cts) { - dev_dbg(hdmi->dev, "%s: pixel clock not supported: %lu\n", - __func__, pixel_clk); - return; + n = hdmi_compute_n(sample_rate, pixel_clk, ratio); + cts = hdmi_compute_cts(sample_rate, pixel_clk, ratio); + if (!cts) { + dev_err(hdmi->dev, + "%s: pixel clock/sample rate not supported: %luMHz / %ukHz\n", + __func__, pixel_clk, sample_rate); } - dev_dbg(hdmi->dev, "%s: samplerate=%d ratio=%d pixelclk=%lu N=%d cts=%d\n", - __func__, hdmi->sample_rate, hdmi->ratio, - pixel_clk, clk_n, clk_cts); + dev_dbg(hdmi->dev, "%s: samplerate=%ukHz ratio=%d pixelclk=%luMHz N=%d cts=%d\n", + __func__, sample_rate, ratio, pixel_clk, n, cts); - hdmi_set_cts_n(hdmi, clk_cts, clk_n); + spin_lock_irq(&hdmi->audio_lock); + hdmi->audio_n = n; + hdmi->audio_cts = cts; + hdmi_set_cts_n(hdmi, cts, hdmi->audio_enable ? n : 0); + spin_unlock_irq(&hdmi->audio_lock); } static void hdmi_init_clk_regenerator(struct dw_hdmi *hdmi) { mutex_lock(&hdmi->audio_mutex); - hdmi_set_clk_regenerator(hdmi, 74250000); + hdmi_set_clk_regenerator(hdmi, 74250000, hdmi->sample_rate, + hdmi->ratio); mutex_unlock(&hdmi->audio_mutex); } static void hdmi_clk_regenerator_update_pixel_clock(struct dw_hdmi *hdmi) { mutex_lock(&hdmi->audio_mutex); - hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock); + hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock, + hdmi->sample_rate, hdmi->ratio); mutex_unlock(&hdmi->audio_mutex); } +void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate) +{ + mutex_lock(&hdmi->audio_mutex); + hdmi->sample_rate = rate; + hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock, + hdmi->sample_rate, hdmi->ratio); + mutex_unlock(&hdmi->audio_mutex); +} +EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_rate); + +void dw_hdmi_audio_enable(struct dw_hdmi *hdmi) +{ + unsigned long flags; + + spin_lock_irqsave(&hdmi->audio_lock, flags); + hdmi->audio_enable = true; + hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n); + spin_unlock_irqrestore(&hdmi->audio_lock, flags); +} +EXPORT_SYMBOL_GPL(dw_hdmi_audio_enable); + +void dw_hdmi_audio_disable(struct dw_hdmi *hdmi) +{ + unsigned long flags; + + spin_lock_irqsave(&hdmi->audio_lock, flags); + hdmi->audio_enable = false; + hdmi_set_cts_n(hdmi, hdmi->audio_cts, 0); + spin_unlock_irqrestore(&hdmi->audio_lock, flags); +} +EXPORT_SYMBOL_GPL(dw_hdmi_audio_disable); + /* * this submodule is responsible for the video data synchronization. * for example, for RGB 4:4:4 input, the data map is defined as @@ -701,9 +740,9 @@ static int hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data, return 0; } -static void dw_hdmi_phy_enable_power(struct dw_hdmi *hdmi, u8 enable) +static void dw_hdmi_phy_enable_powerdown(struct dw_hdmi *hdmi, bool enable) { - hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0, + hdmi_mask_writeb(hdmi, !enable, HDMI_PHY_CONF0, HDMI_PHY_CONF0_PDZ_OFFSET, HDMI_PHY_CONF0_PDZ_MASK); } @@ -753,12 +792,12 @@ static void dw_hdmi_phy_sel_interface_control(struct dw_hdmi *hdmi, u8 enable) static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep, unsigned char res, int cscon) { - unsigned res_idx, i; + unsigned res_idx; u8 val, msec; - const struct dw_hdmi_plat_data *plat_data = hdmi->plat_data; - const struct dw_hdmi_mpll_config *mpll_config = plat_data->mpll_cfg; - const struct dw_hdmi_curr_ctrl *curr_ctrl = plat_data->cur_ctr; - const struct dw_hdmi_phy_config *phy_config = plat_data->phy_config; + const struct dw_hdmi_plat_data *pdata = hdmi->plat_data; + const struct dw_hdmi_mpll_config *mpll_config = pdata->mpll_cfg; + const struct dw_hdmi_curr_ctrl *curr_ctrl = pdata->cur_ctr; + const struct dw_hdmi_phy_config *phy_config = pdata->phy_config; if (prep) return -EINVAL; @@ -778,6 +817,30 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep, return -EINVAL; } + /* PLL/MPLL Cfg - always match on final entry */ + for (; mpll_config->mpixelclock != ~0UL; mpll_config++) + if (hdmi->hdmi_data.video_mode.mpixelclock <= + mpll_config->mpixelclock) + break; + + for (; curr_ctrl->mpixelclock != ~0UL; curr_ctrl++) + if (hdmi->hdmi_data.video_mode.mpixelclock <= + curr_ctrl->mpixelclock) + break; + + for (; phy_config->mpixelclock != ~0UL; phy_config++) + if (hdmi->hdmi_data.video_mode.mpixelclock <= + phy_config->mpixelclock) + break; + + if (mpll_config->mpixelclock == ~0UL || + curr_ctrl->mpixelclock == ~0UL || + phy_config->mpixelclock == ~0UL) { + dev_err(hdmi->dev, "Pixel clock %d - unsupported by HDMI\n", + hdmi->hdmi_data.video_mode.mpixelclock); + return -EINVAL; + } + /* Enable csc path */ if (cscon) val = HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH; @@ -803,48 +866,23 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep, HDMI_PHY_I2CM_SLAVE_ADDR); hdmi_phy_test_clear(hdmi, 0); - /* PLL/MPLL Cfg - always match on final entry */ - for (i = 0; mpll_config[i].mpixelclock != (~0UL); i++) - if (hdmi->hdmi_data.video_mode.mpixelclock <= - mpll_config[i].mpixelclock) - break; - - hdmi_phy_i2c_write(hdmi, mpll_config[i].res[res_idx].cpce, 0x06); - hdmi_phy_i2c_write(hdmi, mpll_config[i].res[res_idx].gmp, 0x15); - - for (i = 0; curr_ctrl[i].mpixelclock != (~0UL); i++) - if (hdmi->hdmi_data.video_mode.mpixelclock <= - curr_ctrl[i].mpixelclock) - break; - - if (curr_ctrl[i].mpixelclock == (~0UL)) { - dev_err(hdmi->dev, "Pixel clock %d - unsupported by HDMI\n", - hdmi->hdmi_data.video_mode.mpixelclock); - return -EINVAL; - } + hdmi_phy_i2c_write(hdmi, mpll_config->res[res_idx].cpce, 0x06); + hdmi_phy_i2c_write(hdmi, mpll_config->res[res_idx].gmp, 0x15); /* CURRCTRL */ - hdmi_phy_i2c_write(hdmi, curr_ctrl[i].curr[res_idx], 0x10); + hdmi_phy_i2c_write(hdmi, curr_ctrl->curr[res_idx], 0x10); hdmi_phy_i2c_write(hdmi, 0x0000, 0x13); /* PLLPHBYCTRL */ hdmi_phy_i2c_write(hdmi, 0x0006, 0x17); - for (i = 0; phy_config[i].mpixelclock != (~0UL); i++) - if (hdmi->hdmi_data.video_mode.mpixelclock <= - phy_config[i].mpixelclock) - break; - - /* RESISTANCE TERM 133Ohm Cfg */ - hdmi_phy_i2c_write(hdmi, phy_config[i].term, 0x19); /* TXTERM */ - /* PREEMP Cgf 0.00 */ - hdmi_phy_i2c_write(hdmi, phy_config[i].sym_ctr, 0x09); /* CKSYMTXCTRL */ - /* TX/CK LVL 10 */ - hdmi_phy_i2c_write(hdmi, phy_config[i].vlev_ctr, 0x0E); /* VLEVCTRL */ + hdmi_phy_i2c_write(hdmi, phy_config->term, 0x19); /* TXTERM */ + hdmi_phy_i2c_write(hdmi, phy_config->sym_ctr, 0x09); /* CKSYMTXCTRL */ + hdmi_phy_i2c_write(hdmi, phy_config->vlev_ctr, 0x0E); /* VLEVCTRL */ /* REMOVE CLK TERM */ hdmi_phy_i2c_write(hdmi, 0x8000, 0x05); /* CKCALCTRL */ - dw_hdmi_phy_enable_power(hdmi, 1); + dw_hdmi_phy_enable_powerdown(hdmi, false); /* toggle TMDS enable */ dw_hdmi_phy_enable_tmds(hdmi, 0); @@ -879,18 +917,17 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep, static int dw_hdmi_phy_init(struct dw_hdmi *hdmi) { int i, ret; - bool cscon = false; + bool cscon; /*check csc whether needed activated in HDMI mode */ - cscon = (is_color_space_conversion(hdmi) && - !hdmi->hdmi_data.video_mode.mdvi); + cscon = hdmi->sink_is_hdmi && is_color_space_conversion(hdmi); /* HDMI Phy spec says to do the phy initialization sequence twice */ for (i = 0; i < 2; i++) { dw_hdmi_phy_sel_data_en_pol(hdmi, 1); dw_hdmi_phy_sel_interface_control(hdmi, 0); dw_hdmi_phy_enable_tmds(hdmi, 0); - dw_hdmi_phy_enable_power(hdmi, 0); + dw_hdmi_phy_enable_powerdown(hdmi, true); /* Enable CSC */ ret = hdmi_phy_configure(hdmi, 0, 8, cscon); @@ -921,74 +958,76 @@ static void hdmi_tx_hdcp_config(struct dw_hdmi *hdmi) HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_MASK, HDMI_A_HDCPCFG1); } -static void hdmi_config_AVI(struct dw_hdmi *hdmi) +static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode) { - u8 val, pix_fmt, under_scan; - u8 act_ratio, coded_ratio, colorimetry, ext_colorimetry; - bool aspect_16_9; + struct hdmi_avi_infoframe frame; + u8 val; - aspect_16_9 = false; /* FIXME */ + /* Initialise info frame from DRM mode */ + drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); - /* AVI Data Byte 1 */ if (hdmi->hdmi_data.enc_out_format == YCBCR444) - pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR444; + frame.colorspace = HDMI_COLORSPACE_YUV444; else if (hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS) - pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR422; + frame.colorspace = HDMI_COLORSPACE_YUV422; else - pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_RGB; - - under_scan = HDMI_FC_AVICONF0_SCAN_INFO_NODATA; - - /* - * Active format identification data is present in the AVI InfoFrame. - * Under scan info, no bar data - */ - val = pix_fmt | under_scan | - HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT | - HDMI_FC_AVICONF0_BAR_DATA_NO_DATA; - - hdmi_writeb(hdmi, val, HDMI_FC_AVICONF0); - - /* AVI Data Byte 2 -Set the Aspect Ratio */ - if (aspect_16_9) { - act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_16_9; - coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_16_9; - } else { - act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_4_3; - coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_4_3; - } + frame.colorspace = HDMI_COLORSPACE_RGB; /* Set up colorimetry */ if (hdmi->hdmi_data.enc_out_format == XVYCC444) { - colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO; + frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601) - ext_colorimetry = - HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601; + frame.extended_colorimetry = + HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/ - ext_colorimetry = - HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709; + frame.extended_colorimetry = + HDMI_EXTENDED_COLORIMETRY_XV_YCC_709; } else if (hdmi->hdmi_data.enc_out_format != RGB) { - if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601) - colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE; - else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/ - colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR; - ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601; + frame.colorimetry = hdmi->hdmi_data.colorimetry; + frame.extended_colorimetry = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; } else { /* Carries no data */ - colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_NO_DATA; - ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601; + frame.colorimetry = HDMI_COLORIMETRY_NONE; + frame.extended_colorimetry = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; } - val = colorimetry | coded_ratio | act_ratio; + frame.scan_mode = HDMI_SCAN_MODE_NONE; + + /* + * The Designware IP uses a different byte format from standard + * AVI info frames, though generally the bits are in the correct + * bytes. + */ + + /* + * AVI data byte 1 differences: Colorspace in bits 4,5 rather than 5,6, + * active aspect present in bit 6 rather than 4. + */ + val = (frame.colorspace & 3) << 4 | (frame.scan_mode & 0x3); + if (frame.active_aspect & 15) + val |= HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT; + if (frame.top_bar || frame.bottom_bar) + val |= HDMI_FC_AVICONF0_BAR_DATA_HORIZ_BAR; + if (frame.left_bar || frame.right_bar) + val |= HDMI_FC_AVICONF0_BAR_DATA_VERT_BAR; + hdmi_writeb(hdmi, val, HDMI_FC_AVICONF0); + + /* AVI data byte 2 differences: none */ + val = ((frame.colorimetry & 0x3) << 6) | + ((frame.picture_aspect & 0x3) << 4) | + (frame.active_aspect & 0xf); hdmi_writeb(hdmi, val, HDMI_FC_AVICONF1); - /* AVI Data Byte 3 */ - val = HDMI_FC_AVICONF2_IT_CONTENT_NO_DATA | ext_colorimetry | - HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT | - HDMI_FC_AVICONF2_SCALING_NONE; + /* AVI data byte 3 differences: none */ + val = ((frame.extended_colorimetry & 0x7) << 4) | + ((frame.quantization_range & 0x3) << 2) | + (frame.nups & 0x3); + if (frame.itc) + val |= HDMI_FC_AVICONF2_IT_CONTENT_VALID; hdmi_writeb(hdmi, val, HDMI_FC_AVICONF2); - /* AVI Data Byte 4 */ - hdmi_writeb(hdmi, hdmi->vic, HDMI_FC_AVIVID); + /* AVI data byte 4 differences: none */ + val = frame.video_code & 0x7f; + hdmi_writeb(hdmi, val, HDMI_FC_AVIVID); /* AVI Data Byte 5- set up input and output pixel repetition */ val = (((hdmi->hdmi_data.video_mode.mpixelrepetitioninput + 1) << @@ -999,20 +1038,23 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi) HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK); hdmi_writeb(hdmi, val, HDMI_FC_PRCONF); - /* IT Content and quantization range = don't care */ - val = HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GRAPHICS | - HDMI_FC_AVICONF3_QUANT_RANGE_LIMITED; + /* + * AVI data byte 5 differences: content type in 0,1 rather than 4,5, + * ycc range in bits 2,3 rather than 6,7 + */ + val = ((frame.ycc_quantization_range & 0x3) << 2) | + (frame.content_type & 0x3); hdmi_writeb(hdmi, val, HDMI_FC_AVICONF3); /* AVI Data Bytes 6-13 */ - hdmi_writeb(hdmi, 0, HDMI_FC_AVIETB0); - hdmi_writeb(hdmi, 0, HDMI_FC_AVIETB1); - hdmi_writeb(hdmi, 0, HDMI_FC_AVISBB0); - hdmi_writeb(hdmi, 0, HDMI_FC_AVISBB1); - hdmi_writeb(hdmi, 0, HDMI_FC_AVIELB0); - hdmi_writeb(hdmi, 0, HDMI_FC_AVIELB1); - hdmi_writeb(hdmi, 0, HDMI_FC_AVISRB0); - hdmi_writeb(hdmi, 0, HDMI_FC_AVISRB1); + hdmi_writeb(hdmi, frame.top_bar & 0xff, HDMI_FC_AVIETB0); + hdmi_writeb(hdmi, (frame.top_bar >> 8) & 0xff, HDMI_FC_AVIETB1); + hdmi_writeb(hdmi, frame.bottom_bar & 0xff, HDMI_FC_AVISBB0); + hdmi_writeb(hdmi, (frame.bottom_bar >> 8) & 0xff, HDMI_FC_AVISBB1); + hdmi_writeb(hdmi, frame.left_bar & 0xff, HDMI_FC_AVIELB0); + hdmi_writeb(hdmi, (frame.left_bar >> 8) & 0xff, HDMI_FC_AVIELB1); + hdmi_writeb(hdmi, frame.right_bar & 0xff, HDMI_FC_AVISRB0); + hdmi_writeb(hdmi, (frame.right_bar >> 8) & 0xff, HDMI_FC_AVISRB1); } static void hdmi_av_composer(struct dw_hdmi *hdmi, @@ -1022,9 +1064,6 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode; int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len; - vmode->mhsyncpolarity = !!(mode->flags & DRM_MODE_FLAG_PHSYNC); - vmode->mvsyncpolarity = !!(mode->flags & DRM_MODE_FLAG_PVSYNC); - vmode->minterlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); vmode->mpixelclock = mode->clock * 1000; dev_dbg(hdmi->dev, "final pixclk = %d\n", vmode->mpixelclock); @@ -1034,13 +1073,13 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE : HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE); - inv_val |= (vmode->mvsyncpolarity ? + inv_val |= mode->flags & DRM_MODE_FLAG_PVSYNC ? HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH : - HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW); + HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW; - inv_val |= (vmode->mhsyncpolarity ? + inv_val |= mode->flags & DRM_MODE_FLAG_PHSYNC ? HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_HIGH : - HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW); + HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW; inv_val |= (vmode->mdataenablepolarity ? HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_HIGH : @@ -1049,17 +1088,17 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, if (hdmi->vic == 39) inv_val |= HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH; else - inv_val |= (vmode->minterlaced ? + inv_val |= mode->flags & DRM_MODE_FLAG_INTERLACE ? HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH : - HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW); + HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW; - inv_val |= (vmode->minterlaced ? + inv_val |= mode->flags & DRM_MODE_FLAG_INTERLACE ? HDMI_FC_INVIDCONF_IN_I_P_INTERLACED : - HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE); + HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE; - inv_val |= (vmode->mdvi ? - HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE : - HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE); + inv_val |= hdmi->sink_is_hdmi ? + HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE : + HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE; hdmi_writeb(hdmi, inv_val, HDMI_FC_INVIDCONF); @@ -1105,7 +1144,7 @@ static void dw_hdmi_phy_disable(struct dw_hdmi *hdmi) return; dw_hdmi_phy_enable_tmds(hdmi, 0); - dw_hdmi_phy_enable_power(hdmi, 0); + dw_hdmi_phy_enable_powerdown(hdmi, true); hdmi->phy_enabled = false; } @@ -1186,10 +1225,8 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode) if (!hdmi->vic) { dev_dbg(hdmi->dev, "Non-CEA mode used in HDMI\n"); - hdmi->hdmi_data.video_mode.mdvi = true; } else { dev_dbg(hdmi->dev, "CEA mode used vic=%d\n", hdmi->vic); - hdmi->hdmi_data.video_mode.mdvi = false; } if ((hdmi->vic == 6) || (hdmi->vic == 7) || @@ -1200,18 +1237,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode) else hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709; - if ((hdmi->vic == 10) || (hdmi->vic == 11) || - (hdmi->vic == 12) || (hdmi->vic == 13) || - (hdmi->vic == 14) || (hdmi->vic == 15) || - (hdmi->vic == 25) || (hdmi->vic == 26) || - (hdmi->vic == 27) || (hdmi->vic == 28) || - (hdmi->vic == 29) || (hdmi->vic == 30) || - (hdmi->vic == 35) || (hdmi->vic == 36) || - (hdmi->vic == 37) || (hdmi->vic == 38)) - hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 1; - else - hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0; - + hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0; hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0; /* TODO: Get input format from IPU (via FB driver interface) */ @@ -1235,18 +1261,22 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode) /* HDMI Initialization Step B.3 */ dw_hdmi_enable_video_path(hdmi); - /* not for DVI mode */ - if (hdmi->hdmi_data.video_mode.mdvi) { - dev_dbg(hdmi->dev, "%s DVI mode\n", __func__); - } else { - dev_dbg(hdmi->dev, "%s CEA mode\n", __func__); + if (hdmi->sink_has_audio) { + dev_dbg(hdmi->dev, "sink has audio support\n"); /* HDMI Initialization Step E - Configure audio */ hdmi_clk_regenerator_update_pixel_clock(hdmi); hdmi_enable_audio_clk(hdmi); + } + + /* not for DVI mode */ + if (hdmi->sink_is_hdmi) { + dev_dbg(hdmi->dev, "%s HDMI mode\n", __func__); /* HDMI Initialization Step F - Configure AVI InfoFrame */ - hdmi_config_AVI(hdmi); + hdmi_config_AVI(hdmi, mode); + } else { + dev_dbg(hdmi->dev, "%s DVI mode\n", __func__); } hdmi_video_packetize(hdmi); @@ -1255,7 +1285,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode) hdmi_tx_hdcp_config(hdmi); dw_hdmi_clear_overflow(hdmi); - if (hdmi->cable_plugin && !hdmi->hdmi_data.video_mode.mdvi) + if (hdmi->cable_plugin && hdmi->sink_is_hdmi) hdmi_enable_overflow_interrupts(hdmi); return 0; @@ -1348,10 +1378,12 @@ static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge, { struct dw_hdmi *hdmi = bridge->driver_private; - dw_hdmi_setup(hdmi, mode); + mutex_lock(&hdmi->mutex); /* Store the display mode for plugin/DKMS poweron events */ memcpy(&hdmi->previous_mode, mode, sizeof(hdmi->previous_mode)); + + mutex_unlock(&hdmi->mutex); } static bool dw_hdmi_bridge_mode_fixup(struct drm_bridge *bridge, @@ -1365,14 +1397,20 @@ static void dw_hdmi_bridge_disable(struct drm_bridge *bridge) { struct dw_hdmi *hdmi = bridge->driver_private; + mutex_lock(&hdmi->mutex); + hdmi->disabled = true; dw_hdmi_poweroff(hdmi); + mutex_unlock(&hdmi->mutex); } static void dw_hdmi_bridge_enable(struct drm_bridge *bridge) { struct dw_hdmi *hdmi = bridge->driver_private; + mutex_lock(&hdmi->mutex); dw_hdmi_poweron(hdmi); + hdmi->disabled = false; + mutex_unlock(&hdmi->mutex); } static void dw_hdmi_bridge_nop(struct drm_bridge *bridge) @@ -1405,6 +1443,8 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector) dev_dbg(hdmi->dev, "got edid: width[%d] x height[%d]\n", edid->width_cm, edid->height_cm); + hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid); + hdmi->sink_has_audio = drm_detect_monitor_audio(edid); drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); kfree(edid); @@ -1423,6 +1463,10 @@ dw_hdmi_connector_mode_valid(struct drm_connector *connector, struct dw_hdmi, connector); enum drm_mode_status mode_status = MODE_OK; + /* We don't support double-clocked modes */ + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + return MODE_BAD; + if (hdmi->plat_data->mode_valid) mode_status = hdmi->plat_data->mode_valid(connector, mode); @@ -1489,21 +1533,21 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id) phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0); if (intr_stat & HDMI_IH_PHY_STAT0_HPD) { + hdmi_modb(hdmi, ~phy_int_pol, HDMI_PHY_HPD, HDMI_PHY_POL0); + mutex_lock(&hdmi->mutex); if (phy_int_pol & HDMI_PHY_HPD) { dev_dbg(hdmi->dev, "EVENT=plugin\n"); - hdmi_modb(hdmi, 0, HDMI_PHY_HPD, HDMI_PHY_POL0); - - dw_hdmi_poweron(hdmi); + if (!hdmi->disabled) + dw_hdmi_poweron(hdmi); } else { dev_dbg(hdmi->dev, "EVENT=plugout\n"); - hdmi_modb(hdmi, HDMI_PHY_HPD, HDMI_PHY_HPD, - HDMI_PHY_POL0); - - dw_hdmi_poweroff(hdmi); + if (!hdmi->disabled) + dw_hdmi_poweroff(hdmi); } - drm_helper_hpd_irq_event(hdmi->connector.dev); + mutex_unlock(&hdmi->mutex); + drm_helper_hpd_irq_event(hdmi->bridge->dev); } hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0); @@ -1570,8 +1614,11 @@ int dw_hdmi_bind(struct device *dev, struct device *master, hdmi->sample_rate = 48000; hdmi->ratio = 100; hdmi->encoder = encoder; + hdmi->disabled = true; + mutex_init(&hdmi->mutex); mutex_init(&hdmi->audio_mutex); + spin_lock_init(&hdmi->audio_lock); of_property_read_u32(np, "reg-io-width", &val); diff --git a/drivers/gpu/drm/bridge/dw_hdmi.h b/drivers/gpu/drm/bridge/dw_hdmi.h index 175dbc89a..ee7f7ed2a 100644 --- a/drivers/gpu/drm/bridge/dw_hdmi.h +++ b/drivers/gpu/drm/bridge/dw_hdmi.h @@ -7,8 +7,8 @@ * (at your option) any later version. */ -#ifndef __IMX_HDMI_H__ -#define __IMX_HDMI_H__ +#ifndef __DW_HDMI_H__ +#define __DW_HDMI_H__ /* Identification Registers */ #define HDMI_DESIGN_ID 0x0000 @@ -525,7 +525,7 @@ /* I2C Master Registers (E-DDC) */ #define HDMI_I2CM_SLAVE 0x7E00 -#define HDMI_I2CMESS 0x7E01 +#define HDMI_I2CM_ADDRESS 0x7E01 #define HDMI_I2CM_DATAO 0x7E02 #define HDMI_I2CM_DATAI 0x7E03 #define HDMI_I2CM_OPERATION 0x7E04 @@ -1031,4 +1031,4 @@ enum { HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_LOW = 0x0, }; -#endif /* __IMX_HDMI_H__ */ +#endif /* __DW_HDMI_H__ */ diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c new file mode 100644 index 000000000..1b1bf2384 --- /dev/null +++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c @@ -0,0 +1,411 @@ +/* + * NXP PTN3460 DP/LVDS bridge driver + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "drm_crtc.h" +#include "drm_crtc_helper.h" +#include "drm_atomic_helper.h" +#include "drm_edid.h" +#include "drmP.h" + +#define PTN3460_EDID_ADDR 0x0 +#define PTN3460_EDID_EMULATION_ADDR 0x84 +#define PTN3460_EDID_ENABLE_EMULATION 0 +#define PTN3460_EDID_EMULATION_SELECTION 1 +#define PTN3460_EDID_SRAM_LOAD_ADDR 0x85 + +struct ptn3460_bridge { + struct drm_connector connector; + struct i2c_client *client; + struct drm_bridge bridge; + struct edid *edid; + struct drm_panel *panel; + struct gpio_desc *gpio_pd_n; + struct gpio_desc *gpio_rst_n; + u32 edid_emulation; + bool enabled; +}; + +static inline struct ptn3460_bridge * + bridge_to_ptn3460(struct drm_bridge *bridge) +{ + return container_of(bridge, struct ptn3460_bridge, bridge); +} + +static inline struct ptn3460_bridge * + connector_to_ptn3460(struct drm_connector *connector) +{ + return container_of(connector, struct ptn3460_bridge, connector); +} + +static int ptn3460_read_bytes(struct ptn3460_bridge *ptn_bridge, char addr, + u8 *buf, int len) +{ + int ret; + + ret = i2c_master_send(ptn_bridge->client, &addr, 1); + if (ret <= 0) { + DRM_ERROR("Failed to send i2c command, ret=%d\n", ret); + return ret; + } + + ret = i2c_master_recv(ptn_bridge->client, buf, len); + if (ret <= 0) { + DRM_ERROR("Failed to recv i2c data, ret=%d\n", ret); + return ret; + } + + return 0; +} + +static int ptn3460_write_byte(struct ptn3460_bridge *ptn_bridge, char addr, + char val) +{ + int ret; + char buf[2]; + + buf[0] = addr; + buf[1] = val; + + ret = i2c_master_send(ptn_bridge->client, buf, ARRAY_SIZE(buf)); + if (ret <= 0) { + DRM_ERROR("Failed to send i2c command, ret=%d\n", ret); + return ret; + } + + return 0; +} + +static int ptn3460_select_edid(struct ptn3460_bridge *ptn_bridge) +{ + int ret; + char val; + + /* Load the selected edid into SRAM (accessed at PTN3460_EDID_ADDR) */ + ret = ptn3460_write_byte(ptn_bridge, PTN3460_EDID_SRAM_LOAD_ADDR, + ptn_bridge->edid_emulation); + if (ret) { + DRM_ERROR("Failed to transfer EDID to sram, ret=%d\n", ret); + return ret; + } + + /* Enable EDID emulation and select the desired EDID */ + val = 1 << PTN3460_EDID_ENABLE_EMULATION | + ptn_bridge->edid_emulation << PTN3460_EDID_EMULATION_SELECTION; + + ret = ptn3460_write_byte(ptn_bridge, PTN3460_EDID_EMULATION_ADDR, val); + if (ret) { + DRM_ERROR("Failed to write EDID value, ret=%d\n", ret); + return ret; + } + + return 0; +} + +static void ptn3460_pre_enable(struct drm_bridge *bridge) +{ + struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); + int ret; + + if (ptn_bridge->enabled) + return; + + gpiod_set_value(ptn_bridge->gpio_pd_n, 1); + + gpiod_set_value(ptn_bridge->gpio_rst_n, 0); + usleep_range(10, 20); + gpiod_set_value(ptn_bridge->gpio_rst_n, 1); + + if (drm_panel_prepare(ptn_bridge->panel)) { + DRM_ERROR("failed to prepare panel\n"); + return; + } + + /* + * There's a bug in the PTN chip where it falsely asserts hotplug before + * it is fully functional. We're forced to wait for the maximum start up + * time specified in the chip's datasheet to make sure we're really up. + */ + msleep(90); + + ret = ptn3460_select_edid(ptn_bridge); + if (ret) + DRM_ERROR("Select EDID failed ret=%d\n", ret); + + ptn_bridge->enabled = true; +} + +static void ptn3460_enable(struct drm_bridge *bridge) +{ + struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); + + if (drm_panel_enable(ptn_bridge->panel)) { + DRM_ERROR("failed to enable panel\n"); + return; + } +} + +static void ptn3460_disable(struct drm_bridge *bridge) +{ + struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); + + if (!ptn_bridge->enabled) + return; + + ptn_bridge->enabled = false; + + if (drm_panel_disable(ptn_bridge->panel)) { + DRM_ERROR("failed to disable panel\n"); + return; + } + + gpiod_set_value(ptn_bridge->gpio_rst_n, 1); + gpiod_set_value(ptn_bridge->gpio_pd_n, 0); +} + +static void ptn3460_post_disable(struct drm_bridge *bridge) +{ + struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); + + if (drm_panel_unprepare(ptn_bridge->panel)) { + DRM_ERROR("failed to unprepare panel\n"); + return; + } +} + +static int ptn3460_get_modes(struct drm_connector *connector) +{ + struct ptn3460_bridge *ptn_bridge; + u8 *edid; + int ret, num_modes = 0; + bool power_off; + + ptn_bridge = connector_to_ptn3460(connector); + + if (ptn_bridge->edid) + return drm_add_edid_modes(connector, ptn_bridge->edid); + + power_off = !ptn_bridge->enabled; + ptn3460_pre_enable(&ptn_bridge->bridge); + + edid = kmalloc(EDID_LENGTH, GFP_KERNEL); + if (!edid) { + DRM_ERROR("Failed to allocate EDID\n"); + return 0; + } + + ret = ptn3460_read_bytes(ptn_bridge, PTN3460_EDID_ADDR, edid, + EDID_LENGTH); + if (ret) { + kfree(edid); + goto out; + } + + ptn_bridge->edid = (struct edid *)edid; + drm_mode_connector_update_edid_property(connector, ptn_bridge->edid); + + num_modes = drm_add_edid_modes(connector, ptn_bridge->edid); + +out: + if (power_off) + ptn3460_disable(&ptn_bridge->bridge); + + return num_modes; +} + +static struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector) +{ + struct ptn3460_bridge *ptn_bridge = connector_to_ptn3460(connector); + + return ptn_bridge->bridge.encoder; +} + +static struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = { + .get_modes = ptn3460_get_modes, + .best_encoder = ptn3460_best_encoder, +}; + +static enum drm_connector_status ptn3460_detect(struct drm_connector *connector, + bool force) +{ + return connector_status_connected; +} + +static void ptn3460_connector_destroy(struct drm_connector *connector) +{ + drm_connector_cleanup(connector); +} + +static struct drm_connector_funcs ptn3460_connector_funcs = { + .dpms = drm_atomic_helper_connector_dpms, + .fill_modes = drm_helper_probe_single_connector_modes, + .detect = ptn3460_detect, + .destroy = ptn3460_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int ptn3460_bridge_attach(struct drm_bridge *bridge) +{ + struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); + int ret; + + if (!bridge->encoder) { + DRM_ERROR("Parent encoder object not found"); + return -ENODEV; + } + + ptn_bridge->connector.polled = DRM_CONNECTOR_POLL_HPD; + ret = drm_connector_init(bridge->dev, &ptn_bridge->connector, + &ptn3460_connector_funcs, DRM_MODE_CONNECTOR_LVDS); + if (ret) { + DRM_ERROR("Failed to initialize connector with drm\n"); + return ret; + } + drm_connector_helper_add(&ptn_bridge->connector, + &ptn3460_connector_helper_funcs); + drm_connector_register(&ptn_bridge->connector); + drm_mode_connector_attach_encoder(&ptn_bridge->connector, + bridge->encoder); + + if (ptn_bridge->panel) + drm_panel_attach(ptn_bridge->panel, &ptn_bridge->connector); + + drm_helper_hpd_irq_event(ptn_bridge->connector.dev); + + return ret; +} + +static struct drm_bridge_funcs ptn3460_bridge_funcs = { + .pre_enable = ptn3460_pre_enable, + .enable = ptn3460_enable, + .disable = ptn3460_disable, + .post_disable = ptn3460_post_disable, + .attach = ptn3460_bridge_attach, +}; + +static int ptn3460_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct ptn3460_bridge *ptn_bridge; + struct device_node *endpoint, *panel_node; + int ret; + + ptn_bridge = devm_kzalloc(dev, sizeof(*ptn_bridge), GFP_KERNEL); + if (!ptn_bridge) { + return -ENOMEM; + } + + endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); + if (endpoint) { + panel_node = of_graph_get_remote_port_parent(endpoint); + if (panel_node) { + ptn_bridge->panel = of_drm_find_panel(panel_node); + of_node_put(panel_node); + if (!ptn_bridge->panel) + return -EPROBE_DEFER; + } + } + + ptn_bridge->client = client; + + ptn_bridge->gpio_pd_n = devm_gpiod_get(&client->dev, "powerdown", + GPIOD_OUT_HIGH); + if (IS_ERR(ptn_bridge->gpio_pd_n)) { + ret = PTR_ERR(ptn_bridge->gpio_pd_n); + dev_err(dev, "cannot get gpio_pd_n %d\n", ret); + return ret; + } + + /* + * Request the reset pin low to avoid the bridge being + * initialized prematurely + */ + ptn_bridge->gpio_rst_n = devm_gpiod_get(&client->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(ptn_bridge->gpio_rst_n)) { + ret = PTR_ERR(ptn_bridge->gpio_rst_n); + DRM_ERROR("cannot get gpio_rst_n %d\n", ret); + return ret; + } + + ret = of_property_read_u32(dev->of_node, "edid-emulation", + &ptn_bridge->edid_emulation); + if (ret) { + dev_err(dev, "Can't read EDID emulation value\n"); + return ret; + } + + ptn_bridge->bridge.funcs = &ptn3460_bridge_funcs; + ptn_bridge->bridge.of_node = dev->of_node; + ret = drm_bridge_add(&ptn_bridge->bridge); + if (ret) { + DRM_ERROR("Failed to add bridge\n"); + return ret; + } + + i2c_set_clientdata(client, ptn_bridge); + + return 0; +} + +static int ptn3460_remove(struct i2c_client *client) +{ + struct ptn3460_bridge *ptn_bridge = i2c_get_clientdata(client); + + drm_bridge_remove(&ptn_bridge->bridge); + + return 0; +} + +static const struct i2c_device_id ptn3460_i2c_table[] = { + {"ptn3460", 0}, + {}, +}; +MODULE_DEVICE_TABLE(i2c, ptn3460_i2c_table); + +static const struct of_device_id ptn3460_match[] = { + { .compatible = "nxp,ptn3460" }, + {}, +}; +MODULE_DEVICE_TABLE(of, ptn3460_match); + +static struct i2c_driver ptn3460_driver = { + .id_table = ptn3460_i2c_table, + .probe = ptn3460_probe, + .remove = ptn3460_remove, + .driver = { + .name = "nxp,ptn3460", + .owner = THIS_MODULE, + .of_match_table = ptn3460_match, + }, +}; +module_i2c_driver(ptn3460_driver); + +MODULE_AUTHOR("Sean Paul "); +MODULE_DESCRIPTION("NXP ptn3460 eDP-LVDS converter driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c new file mode 100644 index 000000000..1a6607beb --- /dev/null +++ b/drivers/gpu/drm/bridge/parade-ps8622.c @@ -0,0 +1,679 @@ +/* + * Parade PS8622 eDP/LVDS bridge driver + * + * Copyright (C) 2014 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "drmP.h" +#include "drm_crtc.h" +#include "drm_crtc_helper.h" +#include "drm_atomic_helper.h" + +/* Brightness scale on the Parade chip */ +#define PS8622_MAX_BRIGHTNESS 0xff + +/* Timings taken from the version 1.7 datasheet for the PS8622/PS8625 */ +#define PS8622_POWER_RISE_T1_MIN_US 10 +#define PS8622_POWER_RISE_T1_MAX_US 10000 +#define PS8622_RST_HIGH_T2_MIN_US 3000 +#define PS8622_RST_HIGH_T2_MAX_US 30000 +#define PS8622_PWMO_END_T12_MS 200 +#define PS8622_POWER_FALL_T16_MAX_US 10000 +#define PS8622_POWER_OFF_T17_MS 500 + +#if ((PS8622_RST_HIGH_T2_MIN_US + PS8622_POWER_RISE_T1_MAX_US) > \ + (PS8622_RST_HIGH_T2_MAX_US + PS8622_POWER_RISE_T1_MIN_US)) +#error "T2.min + T1.max must be less than T2.max + T1.min" +#endif + +struct ps8622_bridge { + struct drm_connector connector; + struct i2c_client *client; + struct drm_bridge bridge; + struct drm_panel *panel; + struct regulator *v12; + struct backlight_device *bl; + + struct gpio_desc *gpio_slp; + struct gpio_desc *gpio_rst; + + u32 max_lane_count; + u32 lane_count; + + bool enabled; +}; + +static inline struct ps8622_bridge * + bridge_to_ps8622(struct drm_bridge *bridge) +{ + return container_of(bridge, struct ps8622_bridge, bridge); +} + +static inline struct ps8622_bridge * + connector_to_ps8622(struct drm_connector *connector) +{ + return container_of(connector, struct ps8622_bridge, connector); +} + +static int ps8622_set(struct i2c_client *client, u8 page, u8 reg, u8 val) +{ + int ret; + struct i2c_adapter *adap = client->adapter; + struct i2c_msg msg; + u8 data[] = {reg, val}; + + msg.addr = client->addr + page; + msg.flags = 0; + msg.len = sizeof(data); + msg.buf = data; + + ret = i2c_transfer(adap, &msg, 1); + if (ret != 1) + pr_warn("PS8622 I2C write (0x%02x,0x%02x,0x%02x) failed: %d\n", + client->addr + page, reg, val, ret); + return !(ret == 1); +} + +static int ps8622_send_config(struct ps8622_bridge *ps8622) +{ + struct i2c_client *cl = ps8622->client; + int err = 0; + + /* HPD low */ + err = ps8622_set(cl, 0x02, 0xa1, 0x01); + if (err) + goto error; + + /* SW setting: [1:0] SW output 1.2V voltage is lower to 96% */ + err = ps8622_set(cl, 0x04, 0x14, 0x01); + if (err) + goto error; + + /* RCO SS setting: [5:4] = b01 0.5%, b10 1%, b11 1.5% */ + err = ps8622_set(cl, 0x04, 0xe3, 0x20); + if (err) + goto error; + + /* [7] RCO SS enable */ + err = ps8622_set(cl, 0x04, 0xe2, 0x80); + if (err) + goto error; + + /* RPHY Setting + * [3:2] CDR tune wait cycle before measure for fine tune + * b00: 1us b01: 0.5us b10:2us, b11: 4us + */ + err = ps8622_set(cl, 0x04, 0x8a, 0x0c); + if (err) + goto error; + + /* [3] RFD always on */ + err = ps8622_set(cl, 0x04, 0x89, 0x08); + if (err) + goto error; + + /* CTN lock in/out: 20000ppm/80000ppm. Lock out 2 times. */ + err = ps8622_set(cl, 0x04, 0x71, 0x2d); + if (err) + goto error; + + /* 2.7G CDR settings: NOF=40LSB for HBR CDR setting */ + err = ps8622_set(cl, 0x04, 0x7d, 0x07); + if (err) + goto error; + + /* [1:0] Fmin=+4bands */ + err = ps8622_set(cl, 0x04, 0x7b, 0x00); + if (err) + goto error; + + /* [7:5] DCO_FTRNG=+-40% */ + err = ps8622_set(cl, 0x04, 0x7a, 0xfd); + if (err) + goto error; + + /* 1.62G CDR settings: [5:2]NOF=64LSB [1:0]DCO scale is 2/5 */ + err = ps8622_set(cl, 0x04, 0xc0, 0x12); + if (err) + goto error; + + /* Gitune=-37% */ + err = ps8622_set(cl, 0x04, 0xc1, 0x92); + if (err) + goto error; + + /* Fbstep=100% */ + err = ps8622_set(cl, 0x04, 0xc2, 0x1c); + if (err) + goto error; + + /* [7] LOS signal disable */ + err = ps8622_set(cl, 0x04, 0x32, 0x80); + if (err) + goto error; + + /* RPIO Setting: [7:4] LVDS driver bias current : 75% (250mV swing) */ + err = ps8622_set(cl, 0x04, 0x00, 0xb0); + if (err) + goto error; + + /* [7:6] Right-bar GPIO output strength is 8mA */ + err = ps8622_set(cl, 0x04, 0x15, 0x40); + if (err) + goto error; + + /* EQ Training State Machine Setting, RCO calibration start */ + err = ps8622_set(cl, 0x04, 0x54, 0x10); + if (err) + goto error; + + /* Logic, needs more than 10 I2C command */ + /* [4:0] MAX_LANE_COUNT set to max supported lanes */ + err = ps8622_set(cl, 0x01, 0x02, 0x80 | ps8622->max_lane_count); + if (err) + goto error; + + /* [4:0] LANE_COUNT_SET set to chosen lane count */ + err = ps8622_set(cl, 0x01, 0x21, 0x80 | ps8622->lane_count); + if (err) + goto error; + + err = ps8622_set(cl, 0x00, 0x52, 0x20); + if (err) + goto error; + + /* HPD CP toggle enable */ + err = ps8622_set(cl, 0x00, 0xf1, 0x03); + if (err) + goto error; + + err = ps8622_set(cl, 0x00, 0x62, 0x41); + if (err) + goto error; + + /* Counter number, add 1ms counter delay */ + err = ps8622_set(cl, 0x00, 0xf6, 0x01); + if (err) + goto error; + + /* [6]PWM function control by DPCD0040f[7], default is PWM block */ + err = ps8622_set(cl, 0x00, 0x77, 0x06); + if (err) + goto error; + + /* 04h Adjust VTotal toleranceto fix the 30Hz no display issue */ + err = ps8622_set(cl, 0x00, 0x4c, 0x04); + if (err) + goto error; + + /* DPCD00400='h00, Parade OUI ='h001cf8 */ + err = ps8622_set(cl, 0x01, 0xc0, 0x00); + if (err) + goto error; + + /* DPCD00401='h1c */ + err = ps8622_set(cl, 0x01, 0xc1, 0x1c); + if (err) + goto error; + + /* DPCD00402='hf8 */ + err = ps8622_set(cl, 0x01, 0xc2, 0xf8); + if (err) + goto error; + + /* DPCD403~408 = ASCII code, D2SLV5='h4432534c5635 */ + err = ps8622_set(cl, 0x01, 0xc3, 0x44); + if (err) + goto error; + + /* DPCD404 */ + err = ps8622_set(cl, 0x01, 0xc4, 0x32); + if (err) + goto error; + + /* DPCD405 */ + err = ps8622_set(cl, 0x01, 0xc5, 0x53); + if (err) + goto error; + + /* DPCD406 */ + err = ps8622_set(cl, 0x01, 0xc6, 0x4c); + if (err) + goto error; + + /* DPCD407 */ + err = ps8622_set(cl, 0x01, 0xc7, 0x56); + if (err) + goto error; + + /* DPCD408 */ + err = ps8622_set(cl, 0x01, 0xc8, 0x35); + if (err) + goto error; + + /* DPCD40A, Initial Code major revision '01' */ + err = ps8622_set(cl, 0x01, 0xca, 0x01); + if (err) + goto error; + + /* DPCD40B, Initial Code minor revision '05' */ + err = ps8622_set(cl, 0x01, 0xcb, 0x05); + if (err) + goto error; + + + if (ps8622->bl) { + /* DPCD720, internal PWM */ + err = ps8622_set(cl, 0x01, 0xa5, 0xa0); + if (err) + goto error; + + /* FFh for 100% brightness, 0h for 0% brightness */ + err = ps8622_set(cl, 0x01, 0xa7, + ps8622->bl->props.brightness); + if (err) + goto error; + } else { + /* DPCD720, external PWM */ + err = ps8622_set(cl, 0x01, 0xa5, 0x80); + if (err) + goto error; + } + + /* Set LVDS output as 6bit-VESA mapping, single LVDS channel */ + err = ps8622_set(cl, 0x01, 0xcc, 0x13); + if (err) + goto error; + + /* Enable SSC set by register */ + err = ps8622_set(cl, 0x02, 0xb1, 0x20); + if (err) + goto error; + + /* Set SSC enabled and +/-1% central spreading */ + err = ps8622_set(cl, 0x04, 0x10, 0x16); + if (err) + goto error; + + /* Logic end */ + /* MPU Clock source: LC => RCO */ + err = ps8622_set(cl, 0x04, 0x59, 0x60); + if (err) + goto error; + + /* LC -> RCO */ + err = ps8622_set(cl, 0x04, 0x54, 0x14); + if (err) + goto error; + + /* HPD high */ + err = ps8622_set(cl, 0x02, 0xa1, 0x91); + +error: + return err ? -EIO : 0; +} + +static int ps8622_backlight_update(struct backlight_device *bl) +{ + struct ps8622_bridge *ps8622 = dev_get_drvdata(&bl->dev); + int ret, brightness = bl->props.brightness; + + if (bl->props.power != FB_BLANK_UNBLANK || + bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK)) + brightness = 0; + + if (!ps8622->enabled) + return -EINVAL; + + ret = ps8622_set(ps8622->client, 0x01, 0xa7, brightness); + + return ret; +} + +static const struct backlight_ops ps8622_backlight_ops = { + .update_status = ps8622_backlight_update, +}; + +static void ps8622_pre_enable(struct drm_bridge *bridge) +{ + struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge); + int ret; + + if (ps8622->enabled) + return; + + gpiod_set_value(ps8622->gpio_rst, 0); + + if (ps8622->v12) { + ret = regulator_enable(ps8622->v12); + if (ret) + DRM_ERROR("fails to enable ps8622->v12"); + } + + if (drm_panel_prepare(ps8622->panel)) { + DRM_ERROR("failed to prepare panel\n"); + return; + } + + gpiod_set_value(ps8622->gpio_slp, 1); + + /* + * T1 is the range of time that it takes for the power to rise after we + * enable the lcd/ps8622 fet. T2 is the range of time in which the + * data sheet specifies we should deassert the reset pin. + * + * If it takes T1.max for the power to rise, we need to wait atleast + * T2.min before deasserting the reset pin. If it takes T1.min for the + * power to rise, we need to wait at most T2.max before deasserting the + * reset pin. + */ + usleep_range(PS8622_RST_HIGH_T2_MIN_US + PS8622_POWER_RISE_T1_MAX_US, + PS8622_RST_HIGH_T2_MAX_US + PS8622_POWER_RISE_T1_MIN_US); + + gpiod_set_value(ps8622->gpio_rst, 1); + + /* wait 20ms after RST high */ + usleep_range(20000, 30000); + + ret = ps8622_send_config(ps8622); + if (ret) { + DRM_ERROR("Failed to send config to bridge (%d)\n", ret); + return; + } + + ps8622->enabled = true; +} + +static void ps8622_enable(struct drm_bridge *bridge) +{ + struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge); + + if (drm_panel_enable(ps8622->panel)) { + DRM_ERROR("failed to enable panel\n"); + return; + } +} + +static void ps8622_disable(struct drm_bridge *bridge) +{ + struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge); + + if (drm_panel_disable(ps8622->panel)) { + DRM_ERROR("failed to disable panel\n"); + return; + } + msleep(PS8622_PWMO_END_T12_MS); +} + +static void ps8622_post_disable(struct drm_bridge *bridge) +{ + struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge); + + if (!ps8622->enabled) + return; + + ps8622->enabled = false; + + /* + * This doesn't matter if the regulators are turned off, but something + * else might keep them on. In that case, we want to assert the slp gpio + * to lower power. + */ + gpiod_set_value(ps8622->gpio_slp, 0); + + if (drm_panel_unprepare(ps8622->panel)) { + DRM_ERROR("failed to unprepare panel\n"); + return; + } + + if (ps8622->v12) + regulator_disable(ps8622->v12); + + /* + * Sleep for at least the amount of time that it takes the power rail to + * fall to prevent asserting the rst gpio from doing anything. + */ + usleep_range(PS8622_POWER_FALL_T16_MAX_US, + 2 * PS8622_POWER_FALL_T16_MAX_US); + gpiod_set_value(ps8622->gpio_rst, 0); + + msleep(PS8622_POWER_OFF_T17_MS); +} + +static int ps8622_get_modes(struct drm_connector *connector) +{ + struct ps8622_bridge *ps8622; + + ps8622 = connector_to_ps8622(connector); + + return drm_panel_get_modes(ps8622->panel); +} + +static struct drm_encoder *ps8622_best_encoder(struct drm_connector *connector) +{ + struct ps8622_bridge *ps8622; + + ps8622 = connector_to_ps8622(connector); + + return ps8622->bridge.encoder; +} + +static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = { + .get_modes = ps8622_get_modes, + .best_encoder = ps8622_best_encoder, +}; + +static enum drm_connector_status ps8622_detect(struct drm_connector *connector, + bool force) +{ + return connector_status_connected; +} + +static void ps8622_connector_destroy(struct drm_connector *connector) +{ + drm_connector_cleanup(connector); +} + +static const struct drm_connector_funcs ps8622_connector_funcs = { + .dpms = drm_atomic_helper_connector_dpms, + .fill_modes = drm_helper_probe_single_connector_modes, + .detect = ps8622_detect, + .destroy = ps8622_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int ps8622_attach(struct drm_bridge *bridge) +{ + struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge); + int ret; + + if (!bridge->encoder) { + DRM_ERROR("Parent encoder object not found"); + return -ENODEV; + } + + ps8622->connector.polled = DRM_CONNECTOR_POLL_HPD; + ret = drm_connector_init(bridge->dev, &ps8622->connector, + &ps8622_connector_funcs, DRM_MODE_CONNECTOR_LVDS); + if (ret) { + DRM_ERROR("Failed to initialize connector with drm\n"); + return ret; + } + drm_connector_helper_add(&ps8622->connector, + &ps8622_connector_helper_funcs); + drm_connector_register(&ps8622->connector); + drm_mode_connector_attach_encoder(&ps8622->connector, + bridge->encoder); + + if (ps8622->panel) + drm_panel_attach(ps8622->panel, &ps8622->connector); + + drm_helper_hpd_irq_event(ps8622->connector.dev); + + return ret; +} + +static const struct drm_bridge_funcs ps8622_bridge_funcs = { + .pre_enable = ps8622_pre_enable, + .enable = ps8622_enable, + .disable = ps8622_disable, + .post_disable = ps8622_post_disable, + .attach = ps8622_attach, +}; + +static const struct of_device_id ps8622_devices[] = { + {.compatible = "parade,ps8622",}, + {.compatible = "parade,ps8625",}, + {} +}; +MODULE_DEVICE_TABLE(of, ps8622_devices); + +static int ps8622_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct device_node *endpoint, *panel_node; + struct ps8622_bridge *ps8622; + int ret; + + ps8622 = devm_kzalloc(dev, sizeof(*ps8622), GFP_KERNEL); + if (!ps8622) + return -ENOMEM; + + endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); + if (endpoint) { + panel_node = of_graph_get_remote_port_parent(endpoint); + if (panel_node) { + ps8622->panel = of_drm_find_panel(panel_node); + of_node_put(panel_node); + if (!ps8622->panel) + return -EPROBE_DEFER; + } + } + + ps8622->client = client; + + ps8622->v12 = devm_regulator_get(dev, "vdd12"); + if (IS_ERR(ps8622->v12)) { + dev_info(dev, "no 1.2v regulator found for PS8622\n"); + ps8622->v12 = NULL; + } + + ps8622->gpio_slp = devm_gpiod_get(dev, "sleep", GPIOD_OUT_HIGH); + if (IS_ERR(ps8622->gpio_slp)) { + ret = PTR_ERR(ps8622->gpio_slp); + dev_err(dev, "cannot get gpio_slp %d\n", ret); + return ret; + } + + /* + * Assert the reset pin high to avoid the bridge being + * initialized prematurely + */ + ps8622->gpio_rst = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ps8622->gpio_rst)) { + ret = PTR_ERR(ps8622->gpio_rst); + dev_err(dev, "cannot get gpio_rst %d\n", ret); + return ret; + } + + ps8622->max_lane_count = id->driver_data; + + if (of_property_read_u32(dev->of_node, "lane-count", + &ps8622->lane_count)) { + ps8622->lane_count = ps8622->max_lane_count; + } else if (ps8622->lane_count > ps8622->max_lane_count) { + dev_info(dev, "lane-count property is too high," + "using max_lane_count\n"); + ps8622->lane_count = ps8622->max_lane_count; + } + + if (!of_find_property(dev->of_node, "use-external-pwm", NULL)) { + ps8622->bl = backlight_device_register("ps8622-backlight", + dev, ps8622, &ps8622_backlight_ops, + NULL); + if (IS_ERR(ps8622->bl)) { + DRM_ERROR("failed to register backlight\n"); + ret = PTR_ERR(ps8622->bl); + ps8622->bl = NULL; + return ret; + } + ps8622->bl->props.max_brightness = PS8622_MAX_BRIGHTNESS; + ps8622->bl->props.brightness = PS8622_MAX_BRIGHTNESS; + } + + ps8622->bridge.funcs = &ps8622_bridge_funcs; + ps8622->bridge.of_node = dev->of_node; + ret = drm_bridge_add(&ps8622->bridge); + if (ret) { + DRM_ERROR("Failed to add bridge\n"); + return ret; + } + + i2c_set_clientdata(client, ps8622); + + return 0; +} + +static int ps8622_remove(struct i2c_client *client) +{ + struct ps8622_bridge *ps8622 = i2c_get_clientdata(client); + + if (ps8622->bl) + backlight_device_unregister(ps8622->bl); + + drm_bridge_remove(&ps8622->bridge); + + return 0; +} + +static const struct i2c_device_id ps8622_i2c_table[] = { + /* Device type, max_lane_count */ + {"ps8622", 1}, + {"ps8625", 2}, + {}, +}; +MODULE_DEVICE_TABLE(i2c, ps8622_i2c_table); + +static struct i2c_driver ps8622_driver = { + .id_table = ps8622_i2c_table, + .probe = ps8622_probe, + .remove = ps8622_remove, + .driver = { + .name = "ps8622", + .owner = THIS_MODULE, + .of_match_table = ps8622_devices, + }, +}; +module_i2c_driver(ps8622_driver); + +MODULE_AUTHOR("Vincent Palatin "); +MODULE_DESCRIPTION("Parade ps8622/ps8625 eDP-LVDS converter driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/bridge/ps8622.c b/drivers/gpu/drm/bridge/ps8622.c deleted file mode 100644 index 1a6607beb..000000000 --- a/drivers/gpu/drm/bridge/ps8622.c +++ /dev/null @@ -1,679 +0,0 @@ -/* - * Parade PS8622 eDP/LVDS bridge driver - * - * Copyright (C) 2014 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "drmP.h" -#include "drm_crtc.h" -#include "drm_crtc_helper.h" -#include "drm_atomic_helper.h" - -/* Brightness scale on the Parade chip */ -#define PS8622_MAX_BRIGHTNESS 0xff - -/* Timings taken from the version 1.7 datasheet for the PS8622/PS8625 */ -#define PS8622_POWER_RISE_T1_MIN_US 10 -#define PS8622_POWER_RISE_T1_MAX_US 10000 -#define PS8622_RST_HIGH_T2_MIN_US 3000 -#define PS8622_RST_HIGH_T2_MAX_US 30000 -#define PS8622_PWMO_END_T12_MS 200 -#define PS8622_POWER_FALL_T16_MAX_US 10000 -#define PS8622_POWER_OFF_T17_MS 500 - -#if ((PS8622_RST_HIGH_T2_MIN_US + PS8622_POWER_RISE_T1_MAX_US) > \ - (PS8622_RST_HIGH_T2_MAX_US + PS8622_POWER_RISE_T1_MIN_US)) -#error "T2.min + T1.max must be less than T2.max + T1.min" -#endif - -struct ps8622_bridge { - struct drm_connector connector; - struct i2c_client *client; - struct drm_bridge bridge; - struct drm_panel *panel; - struct regulator *v12; - struct backlight_device *bl; - - struct gpio_desc *gpio_slp; - struct gpio_desc *gpio_rst; - - u32 max_lane_count; - u32 lane_count; - - bool enabled; -}; - -static inline struct ps8622_bridge * - bridge_to_ps8622(struct drm_bridge *bridge) -{ - return container_of(bridge, struct ps8622_bridge, bridge); -} - -static inline struct ps8622_bridge * - connector_to_ps8622(struct drm_connector *connector) -{ - return container_of(connector, struct ps8622_bridge, connector); -} - -static int ps8622_set(struct i2c_client *client, u8 page, u8 reg, u8 val) -{ - int ret; - struct i2c_adapter *adap = client->adapter; - struct i2c_msg msg; - u8 data[] = {reg, val}; - - msg.addr = client->addr + page; - msg.flags = 0; - msg.len = sizeof(data); - msg.buf = data; - - ret = i2c_transfer(adap, &msg, 1); - if (ret != 1) - pr_warn("PS8622 I2C write (0x%02x,0x%02x,0x%02x) failed: %d\n", - client->addr + page, reg, val, ret); - return !(ret == 1); -} - -static int ps8622_send_config(struct ps8622_bridge *ps8622) -{ - struct i2c_client *cl = ps8622->client; - int err = 0; - - /* HPD low */ - err = ps8622_set(cl, 0x02, 0xa1, 0x01); - if (err) - goto error; - - /* SW setting: [1:0] SW output 1.2V voltage is lower to 96% */ - err = ps8622_set(cl, 0x04, 0x14, 0x01); - if (err) - goto error; - - /* RCO SS setting: [5:4] = b01 0.5%, b10 1%, b11 1.5% */ - err = ps8622_set(cl, 0x04, 0xe3, 0x20); - if (err) - goto error; - - /* [7] RCO SS enable */ - err = ps8622_set(cl, 0x04, 0xe2, 0x80); - if (err) - goto error; - - /* RPHY Setting - * [3:2] CDR tune wait cycle before measure for fine tune - * b00: 1us b01: 0.5us b10:2us, b11: 4us - */ - err = ps8622_set(cl, 0x04, 0x8a, 0x0c); - if (err) - goto error; - - /* [3] RFD always on */ - err = ps8622_set(cl, 0x04, 0x89, 0x08); - if (err) - goto error; - - /* CTN lock in/out: 20000ppm/80000ppm. Lock out 2 times. */ - err = ps8622_set(cl, 0x04, 0x71, 0x2d); - if (err) - goto error; - - /* 2.7G CDR settings: NOF=40LSB for HBR CDR setting */ - err = ps8622_set(cl, 0x04, 0x7d, 0x07); - if (err) - goto error; - - /* [1:0] Fmin=+4bands */ - err = ps8622_set(cl, 0x04, 0x7b, 0x00); - if (err) - goto error; - - /* [7:5] DCO_FTRNG=+-40% */ - err = ps8622_set(cl, 0x04, 0x7a, 0xfd); - if (err) - goto error; - - /* 1.62G CDR settings: [5:2]NOF=64LSB [1:0]DCO scale is 2/5 */ - err = ps8622_set(cl, 0x04, 0xc0, 0x12); - if (err) - goto error; - - /* Gitune=-37% */ - err = ps8622_set(cl, 0x04, 0xc1, 0x92); - if (err) - goto error; - - /* Fbstep=100% */ - err = ps8622_set(cl, 0x04, 0xc2, 0x1c); - if (err) - goto error; - - /* [7] LOS signal disable */ - err = ps8622_set(cl, 0x04, 0x32, 0x80); - if (err) - goto error; - - /* RPIO Setting: [7:4] LVDS driver bias current : 75% (250mV swing) */ - err = ps8622_set(cl, 0x04, 0x00, 0xb0); - if (err) - goto error; - - /* [7:6] Right-bar GPIO output strength is 8mA */ - err = ps8622_set(cl, 0x04, 0x15, 0x40); - if (err) - goto error; - - /* EQ Training State Machine Setting, RCO calibration start */ - err = ps8622_set(cl, 0x04, 0x54, 0x10); - if (err) - goto error; - - /* Logic, needs more than 10 I2C command */ - /* [4:0] MAX_LANE_COUNT set to max supported lanes */ - err = ps8622_set(cl, 0x01, 0x02, 0x80 | ps8622->max_lane_count); - if (err) - goto error; - - /* [4:0] LANE_COUNT_SET set to chosen lane count */ - err = ps8622_set(cl, 0x01, 0x21, 0x80 | ps8622->lane_count); - if (err) - goto error; - - err = ps8622_set(cl, 0x00, 0x52, 0x20); - if (err) - goto error; - - /* HPD CP toggle enable */ - err = ps8622_set(cl, 0x00, 0xf1, 0x03); - if (err) - goto error; - - err = ps8622_set(cl, 0x00, 0x62, 0x41); - if (err) - goto error; - - /* Counter number, add 1ms counter delay */ - err = ps8622_set(cl, 0x00, 0xf6, 0x01); - if (err) - goto error; - - /* [6]PWM function control by DPCD0040f[7], default is PWM block */ - err = ps8622_set(cl, 0x00, 0x77, 0x06); - if (err) - goto error; - - /* 04h Adjust VTotal toleranceto fix the 30Hz no display issue */ - err = ps8622_set(cl, 0x00, 0x4c, 0x04); - if (err) - goto error; - - /* DPCD00400='h00, Parade OUI ='h001cf8 */ - err = ps8622_set(cl, 0x01, 0xc0, 0x00); - if (err) - goto error; - - /* DPCD00401='h1c */ - err = ps8622_set(cl, 0x01, 0xc1, 0x1c); - if (err) - goto error; - - /* DPCD00402='hf8 */ - err = ps8622_set(cl, 0x01, 0xc2, 0xf8); - if (err) - goto error; - - /* DPCD403~408 = ASCII code, D2SLV5='h4432534c5635 */ - err = ps8622_set(cl, 0x01, 0xc3, 0x44); - if (err) - goto error; - - /* DPCD404 */ - err = ps8622_set(cl, 0x01, 0xc4, 0x32); - if (err) - goto error; - - /* DPCD405 */ - err = ps8622_set(cl, 0x01, 0xc5, 0x53); - if (err) - goto error; - - /* DPCD406 */ - err = ps8622_set(cl, 0x01, 0xc6, 0x4c); - if (err) - goto error; - - /* DPCD407 */ - err = ps8622_set(cl, 0x01, 0xc7, 0x56); - if (err) - goto error; - - /* DPCD408 */ - err = ps8622_set(cl, 0x01, 0xc8, 0x35); - if (err) - goto error; - - /* DPCD40A, Initial Code major revision '01' */ - err = ps8622_set(cl, 0x01, 0xca, 0x01); - if (err) - goto error; - - /* DPCD40B, Initial Code minor revision '05' */ - err = ps8622_set(cl, 0x01, 0xcb, 0x05); - if (err) - goto error; - - - if (ps8622->bl) { - /* DPCD720, internal PWM */ - err = ps8622_set(cl, 0x01, 0xa5, 0xa0); - if (err) - goto error; - - /* FFh for 100% brightness, 0h for 0% brightness */ - err = ps8622_set(cl, 0x01, 0xa7, - ps8622->bl->props.brightness); - if (err) - goto error; - } else { - /* DPCD720, external PWM */ - err = ps8622_set(cl, 0x01, 0xa5, 0x80); - if (err) - goto error; - } - - /* Set LVDS output as 6bit-VESA mapping, single LVDS channel */ - err = ps8622_set(cl, 0x01, 0xcc, 0x13); - if (err) - goto error; - - /* Enable SSC set by register */ - err = ps8622_set(cl, 0x02, 0xb1, 0x20); - if (err) - goto error; - - /* Set SSC enabled and +/-1% central spreading */ - err = ps8622_set(cl, 0x04, 0x10, 0x16); - if (err) - goto error; - - /* Logic end */ - /* MPU Clock source: LC => RCO */ - err = ps8622_set(cl, 0x04, 0x59, 0x60); - if (err) - goto error; - - /* LC -> RCO */ - err = ps8622_set(cl, 0x04, 0x54, 0x14); - if (err) - goto error; - - /* HPD high */ - err = ps8622_set(cl, 0x02, 0xa1, 0x91); - -error: - return err ? -EIO : 0; -} - -static int ps8622_backlight_update(struct backlight_device *bl) -{ - struct ps8622_bridge *ps8622 = dev_get_drvdata(&bl->dev); - int ret, brightness = bl->props.brightness; - - if (bl->props.power != FB_BLANK_UNBLANK || - bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK)) - brightness = 0; - - if (!ps8622->enabled) - return -EINVAL; - - ret = ps8622_set(ps8622->client, 0x01, 0xa7, brightness); - - return ret; -} - -static const struct backlight_ops ps8622_backlight_ops = { - .update_status = ps8622_backlight_update, -}; - -static void ps8622_pre_enable(struct drm_bridge *bridge) -{ - struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge); - int ret; - - if (ps8622->enabled) - return; - - gpiod_set_value(ps8622->gpio_rst, 0); - - if (ps8622->v12) { - ret = regulator_enable(ps8622->v12); - if (ret) - DRM_ERROR("fails to enable ps8622->v12"); - } - - if (drm_panel_prepare(ps8622->panel)) { - DRM_ERROR("failed to prepare panel\n"); - return; - } - - gpiod_set_value(ps8622->gpio_slp, 1); - - /* - * T1 is the range of time that it takes for the power to rise after we - * enable the lcd/ps8622 fet. T2 is the range of time in which the - * data sheet specifies we should deassert the reset pin. - * - * If it takes T1.max for the power to rise, we need to wait atleast - * T2.min before deasserting the reset pin. If it takes T1.min for the - * power to rise, we need to wait at most T2.max before deasserting the - * reset pin. - */ - usleep_range(PS8622_RST_HIGH_T2_MIN_US + PS8622_POWER_RISE_T1_MAX_US, - PS8622_RST_HIGH_T2_MAX_US + PS8622_POWER_RISE_T1_MIN_US); - - gpiod_set_value(ps8622->gpio_rst, 1); - - /* wait 20ms after RST high */ - usleep_range(20000, 30000); - - ret = ps8622_send_config(ps8622); - if (ret) { - DRM_ERROR("Failed to send config to bridge (%d)\n", ret); - return; - } - - ps8622->enabled = true; -} - -static void ps8622_enable(struct drm_bridge *bridge) -{ - struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge); - - if (drm_panel_enable(ps8622->panel)) { - DRM_ERROR("failed to enable panel\n"); - return; - } -} - -static void ps8622_disable(struct drm_bridge *bridge) -{ - struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge); - - if (drm_panel_disable(ps8622->panel)) { - DRM_ERROR("failed to disable panel\n"); - return; - } - msleep(PS8622_PWMO_END_T12_MS); -} - -static void ps8622_post_disable(struct drm_bridge *bridge) -{ - struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge); - - if (!ps8622->enabled) - return; - - ps8622->enabled = false; - - /* - * This doesn't matter if the regulators are turned off, but something - * else might keep them on. In that case, we want to assert the slp gpio - * to lower power. - */ - gpiod_set_value(ps8622->gpio_slp, 0); - - if (drm_panel_unprepare(ps8622->panel)) { - DRM_ERROR("failed to unprepare panel\n"); - return; - } - - if (ps8622->v12) - regulator_disable(ps8622->v12); - - /* - * Sleep for at least the amount of time that it takes the power rail to - * fall to prevent asserting the rst gpio from doing anything. - */ - usleep_range(PS8622_POWER_FALL_T16_MAX_US, - 2 * PS8622_POWER_FALL_T16_MAX_US); - gpiod_set_value(ps8622->gpio_rst, 0); - - msleep(PS8622_POWER_OFF_T17_MS); -} - -static int ps8622_get_modes(struct drm_connector *connector) -{ - struct ps8622_bridge *ps8622; - - ps8622 = connector_to_ps8622(connector); - - return drm_panel_get_modes(ps8622->panel); -} - -static struct drm_encoder *ps8622_best_encoder(struct drm_connector *connector) -{ - struct ps8622_bridge *ps8622; - - ps8622 = connector_to_ps8622(connector); - - return ps8622->bridge.encoder; -} - -static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = { - .get_modes = ps8622_get_modes, - .best_encoder = ps8622_best_encoder, -}; - -static enum drm_connector_status ps8622_detect(struct drm_connector *connector, - bool force) -{ - return connector_status_connected; -} - -static void ps8622_connector_destroy(struct drm_connector *connector) -{ - drm_connector_cleanup(connector); -} - -static const struct drm_connector_funcs ps8622_connector_funcs = { - .dpms = drm_atomic_helper_connector_dpms, - .fill_modes = drm_helper_probe_single_connector_modes, - .detect = ps8622_detect, - .destroy = ps8622_connector_destroy, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static int ps8622_attach(struct drm_bridge *bridge) -{ - struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge); - int ret; - - if (!bridge->encoder) { - DRM_ERROR("Parent encoder object not found"); - return -ENODEV; - } - - ps8622->connector.polled = DRM_CONNECTOR_POLL_HPD; - ret = drm_connector_init(bridge->dev, &ps8622->connector, - &ps8622_connector_funcs, DRM_MODE_CONNECTOR_LVDS); - if (ret) { - DRM_ERROR("Failed to initialize connector with drm\n"); - return ret; - } - drm_connector_helper_add(&ps8622->connector, - &ps8622_connector_helper_funcs); - drm_connector_register(&ps8622->connector); - drm_mode_connector_attach_encoder(&ps8622->connector, - bridge->encoder); - - if (ps8622->panel) - drm_panel_attach(ps8622->panel, &ps8622->connector); - - drm_helper_hpd_irq_event(ps8622->connector.dev); - - return ret; -} - -static const struct drm_bridge_funcs ps8622_bridge_funcs = { - .pre_enable = ps8622_pre_enable, - .enable = ps8622_enable, - .disable = ps8622_disable, - .post_disable = ps8622_post_disable, - .attach = ps8622_attach, -}; - -static const struct of_device_id ps8622_devices[] = { - {.compatible = "parade,ps8622",}, - {.compatible = "parade,ps8625",}, - {} -}; -MODULE_DEVICE_TABLE(of, ps8622_devices); - -static int ps8622_probe(struct i2c_client *client, - const struct i2c_device_id *id) -{ - struct device *dev = &client->dev; - struct device_node *endpoint, *panel_node; - struct ps8622_bridge *ps8622; - int ret; - - ps8622 = devm_kzalloc(dev, sizeof(*ps8622), GFP_KERNEL); - if (!ps8622) - return -ENOMEM; - - endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); - if (endpoint) { - panel_node = of_graph_get_remote_port_parent(endpoint); - if (panel_node) { - ps8622->panel = of_drm_find_panel(panel_node); - of_node_put(panel_node); - if (!ps8622->panel) - return -EPROBE_DEFER; - } - } - - ps8622->client = client; - - ps8622->v12 = devm_regulator_get(dev, "vdd12"); - if (IS_ERR(ps8622->v12)) { - dev_info(dev, "no 1.2v regulator found for PS8622\n"); - ps8622->v12 = NULL; - } - - ps8622->gpio_slp = devm_gpiod_get(dev, "sleep", GPIOD_OUT_HIGH); - if (IS_ERR(ps8622->gpio_slp)) { - ret = PTR_ERR(ps8622->gpio_slp); - dev_err(dev, "cannot get gpio_slp %d\n", ret); - return ret; - } - - /* - * Assert the reset pin high to avoid the bridge being - * initialized prematurely - */ - ps8622->gpio_rst = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(ps8622->gpio_rst)) { - ret = PTR_ERR(ps8622->gpio_rst); - dev_err(dev, "cannot get gpio_rst %d\n", ret); - return ret; - } - - ps8622->max_lane_count = id->driver_data; - - if (of_property_read_u32(dev->of_node, "lane-count", - &ps8622->lane_count)) { - ps8622->lane_count = ps8622->max_lane_count; - } else if (ps8622->lane_count > ps8622->max_lane_count) { - dev_info(dev, "lane-count property is too high," - "using max_lane_count\n"); - ps8622->lane_count = ps8622->max_lane_count; - } - - if (!of_find_property(dev->of_node, "use-external-pwm", NULL)) { - ps8622->bl = backlight_device_register("ps8622-backlight", - dev, ps8622, &ps8622_backlight_ops, - NULL); - if (IS_ERR(ps8622->bl)) { - DRM_ERROR("failed to register backlight\n"); - ret = PTR_ERR(ps8622->bl); - ps8622->bl = NULL; - return ret; - } - ps8622->bl->props.max_brightness = PS8622_MAX_BRIGHTNESS; - ps8622->bl->props.brightness = PS8622_MAX_BRIGHTNESS; - } - - ps8622->bridge.funcs = &ps8622_bridge_funcs; - ps8622->bridge.of_node = dev->of_node; - ret = drm_bridge_add(&ps8622->bridge); - if (ret) { - DRM_ERROR("Failed to add bridge\n"); - return ret; - } - - i2c_set_clientdata(client, ps8622); - - return 0; -} - -static int ps8622_remove(struct i2c_client *client) -{ - struct ps8622_bridge *ps8622 = i2c_get_clientdata(client); - - if (ps8622->bl) - backlight_device_unregister(ps8622->bl); - - drm_bridge_remove(&ps8622->bridge); - - return 0; -} - -static const struct i2c_device_id ps8622_i2c_table[] = { - /* Device type, max_lane_count */ - {"ps8622", 1}, - {"ps8625", 2}, - {}, -}; -MODULE_DEVICE_TABLE(i2c, ps8622_i2c_table); - -static struct i2c_driver ps8622_driver = { - .id_table = ps8622_i2c_table, - .probe = ps8622_probe, - .remove = ps8622_remove, - .driver = { - .name = "ps8622", - .owner = THIS_MODULE, - .of_match_table = ps8622_devices, - }, -}; -module_i2c_driver(ps8622_driver); - -MODULE_AUTHOR("Vincent Palatin "); -MODULE_DESCRIPTION("Parade ps8622/ps8625 eDP-LVDS converter driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c deleted file mode 100644 index 1b1bf2384..000000000 --- a/drivers/gpu/drm/bridge/ptn3460.c +++ /dev/null @@ -1,411 +0,0 @@ -/* - * NXP PTN3460 DP/LVDS bridge driver - * - * Copyright (C) 2013 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "drm_crtc.h" -#include "drm_crtc_helper.h" -#include "drm_atomic_helper.h" -#include "drm_edid.h" -#include "drmP.h" - -#define PTN3460_EDID_ADDR 0x0 -#define PTN3460_EDID_EMULATION_ADDR 0x84 -#define PTN3460_EDID_ENABLE_EMULATION 0 -#define PTN3460_EDID_EMULATION_SELECTION 1 -#define PTN3460_EDID_SRAM_LOAD_ADDR 0x85 - -struct ptn3460_bridge { - struct drm_connector connector; - struct i2c_client *client; - struct drm_bridge bridge; - struct edid *edid; - struct drm_panel *panel; - struct gpio_desc *gpio_pd_n; - struct gpio_desc *gpio_rst_n; - u32 edid_emulation; - bool enabled; -}; - -static inline struct ptn3460_bridge * - bridge_to_ptn3460(struct drm_bridge *bridge) -{ - return container_of(bridge, struct ptn3460_bridge, bridge); -} - -static inline struct ptn3460_bridge * - connector_to_ptn3460(struct drm_connector *connector) -{ - return container_of(connector, struct ptn3460_bridge, connector); -} - -static int ptn3460_read_bytes(struct ptn3460_bridge *ptn_bridge, char addr, - u8 *buf, int len) -{ - int ret; - - ret = i2c_master_send(ptn_bridge->client, &addr, 1); - if (ret <= 0) { - DRM_ERROR("Failed to send i2c command, ret=%d\n", ret); - return ret; - } - - ret = i2c_master_recv(ptn_bridge->client, buf, len); - if (ret <= 0) { - DRM_ERROR("Failed to recv i2c data, ret=%d\n", ret); - return ret; - } - - return 0; -} - -static int ptn3460_write_byte(struct ptn3460_bridge *ptn_bridge, char addr, - char val) -{ - int ret; - char buf[2]; - - buf[0] = addr; - buf[1] = val; - - ret = i2c_master_send(ptn_bridge->client, buf, ARRAY_SIZE(buf)); - if (ret <= 0) { - DRM_ERROR("Failed to send i2c command, ret=%d\n", ret); - return ret; - } - - return 0; -} - -static int ptn3460_select_edid(struct ptn3460_bridge *ptn_bridge) -{ - int ret; - char val; - - /* Load the selected edid into SRAM (accessed at PTN3460_EDID_ADDR) */ - ret = ptn3460_write_byte(ptn_bridge, PTN3460_EDID_SRAM_LOAD_ADDR, - ptn_bridge->edid_emulation); - if (ret) { - DRM_ERROR("Failed to transfer EDID to sram, ret=%d\n", ret); - return ret; - } - - /* Enable EDID emulation and select the desired EDID */ - val = 1 << PTN3460_EDID_ENABLE_EMULATION | - ptn_bridge->edid_emulation << PTN3460_EDID_EMULATION_SELECTION; - - ret = ptn3460_write_byte(ptn_bridge, PTN3460_EDID_EMULATION_ADDR, val); - if (ret) { - DRM_ERROR("Failed to write EDID value, ret=%d\n", ret); - return ret; - } - - return 0; -} - -static void ptn3460_pre_enable(struct drm_bridge *bridge) -{ - struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); - int ret; - - if (ptn_bridge->enabled) - return; - - gpiod_set_value(ptn_bridge->gpio_pd_n, 1); - - gpiod_set_value(ptn_bridge->gpio_rst_n, 0); - usleep_range(10, 20); - gpiod_set_value(ptn_bridge->gpio_rst_n, 1); - - if (drm_panel_prepare(ptn_bridge->panel)) { - DRM_ERROR("failed to prepare panel\n"); - return; - } - - /* - * There's a bug in the PTN chip where it falsely asserts hotplug before - * it is fully functional. We're forced to wait for the maximum start up - * time specified in the chip's datasheet to make sure we're really up. - */ - msleep(90); - - ret = ptn3460_select_edid(ptn_bridge); - if (ret) - DRM_ERROR("Select EDID failed ret=%d\n", ret); - - ptn_bridge->enabled = true; -} - -static void ptn3460_enable(struct drm_bridge *bridge) -{ - struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); - - if (drm_panel_enable(ptn_bridge->panel)) { - DRM_ERROR("failed to enable panel\n"); - return; - } -} - -static void ptn3460_disable(struct drm_bridge *bridge) -{ - struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); - - if (!ptn_bridge->enabled) - return; - - ptn_bridge->enabled = false; - - if (drm_panel_disable(ptn_bridge->panel)) { - DRM_ERROR("failed to disable panel\n"); - return; - } - - gpiod_set_value(ptn_bridge->gpio_rst_n, 1); - gpiod_set_value(ptn_bridge->gpio_pd_n, 0); -} - -static void ptn3460_post_disable(struct drm_bridge *bridge) -{ - struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); - - if (drm_panel_unprepare(ptn_bridge->panel)) { - DRM_ERROR("failed to unprepare panel\n"); - return; - } -} - -static int ptn3460_get_modes(struct drm_connector *connector) -{ - struct ptn3460_bridge *ptn_bridge; - u8 *edid; - int ret, num_modes = 0; - bool power_off; - - ptn_bridge = connector_to_ptn3460(connector); - - if (ptn_bridge->edid) - return drm_add_edid_modes(connector, ptn_bridge->edid); - - power_off = !ptn_bridge->enabled; - ptn3460_pre_enable(&ptn_bridge->bridge); - - edid = kmalloc(EDID_LENGTH, GFP_KERNEL); - if (!edid) { - DRM_ERROR("Failed to allocate EDID\n"); - return 0; - } - - ret = ptn3460_read_bytes(ptn_bridge, PTN3460_EDID_ADDR, edid, - EDID_LENGTH); - if (ret) { - kfree(edid); - goto out; - } - - ptn_bridge->edid = (struct edid *)edid; - drm_mode_connector_update_edid_property(connector, ptn_bridge->edid); - - num_modes = drm_add_edid_modes(connector, ptn_bridge->edid); - -out: - if (power_off) - ptn3460_disable(&ptn_bridge->bridge); - - return num_modes; -} - -static struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector) -{ - struct ptn3460_bridge *ptn_bridge = connector_to_ptn3460(connector); - - return ptn_bridge->bridge.encoder; -} - -static struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = { - .get_modes = ptn3460_get_modes, - .best_encoder = ptn3460_best_encoder, -}; - -static enum drm_connector_status ptn3460_detect(struct drm_connector *connector, - bool force) -{ - return connector_status_connected; -} - -static void ptn3460_connector_destroy(struct drm_connector *connector) -{ - drm_connector_cleanup(connector); -} - -static struct drm_connector_funcs ptn3460_connector_funcs = { - .dpms = drm_atomic_helper_connector_dpms, - .fill_modes = drm_helper_probe_single_connector_modes, - .detect = ptn3460_detect, - .destroy = ptn3460_connector_destroy, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static int ptn3460_bridge_attach(struct drm_bridge *bridge) -{ - struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); - int ret; - - if (!bridge->encoder) { - DRM_ERROR("Parent encoder object not found"); - return -ENODEV; - } - - ptn_bridge->connector.polled = DRM_CONNECTOR_POLL_HPD; - ret = drm_connector_init(bridge->dev, &ptn_bridge->connector, - &ptn3460_connector_funcs, DRM_MODE_CONNECTOR_LVDS); - if (ret) { - DRM_ERROR("Failed to initialize connector with drm\n"); - return ret; - } - drm_connector_helper_add(&ptn_bridge->connector, - &ptn3460_connector_helper_funcs); - drm_connector_register(&ptn_bridge->connector); - drm_mode_connector_attach_encoder(&ptn_bridge->connector, - bridge->encoder); - - if (ptn_bridge->panel) - drm_panel_attach(ptn_bridge->panel, &ptn_bridge->connector); - - drm_helper_hpd_irq_event(ptn_bridge->connector.dev); - - return ret; -} - -static struct drm_bridge_funcs ptn3460_bridge_funcs = { - .pre_enable = ptn3460_pre_enable, - .enable = ptn3460_enable, - .disable = ptn3460_disable, - .post_disable = ptn3460_post_disable, - .attach = ptn3460_bridge_attach, -}; - -static int ptn3460_probe(struct i2c_client *client, - const struct i2c_device_id *id) -{ - struct device *dev = &client->dev; - struct ptn3460_bridge *ptn_bridge; - struct device_node *endpoint, *panel_node; - int ret; - - ptn_bridge = devm_kzalloc(dev, sizeof(*ptn_bridge), GFP_KERNEL); - if (!ptn_bridge) { - return -ENOMEM; - } - - endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); - if (endpoint) { - panel_node = of_graph_get_remote_port_parent(endpoint); - if (panel_node) { - ptn_bridge->panel = of_drm_find_panel(panel_node); - of_node_put(panel_node); - if (!ptn_bridge->panel) - return -EPROBE_DEFER; - } - } - - ptn_bridge->client = client; - - ptn_bridge->gpio_pd_n = devm_gpiod_get(&client->dev, "powerdown", - GPIOD_OUT_HIGH); - if (IS_ERR(ptn_bridge->gpio_pd_n)) { - ret = PTR_ERR(ptn_bridge->gpio_pd_n); - dev_err(dev, "cannot get gpio_pd_n %d\n", ret); - return ret; - } - - /* - * Request the reset pin low to avoid the bridge being - * initialized prematurely - */ - ptn_bridge->gpio_rst_n = devm_gpiod_get(&client->dev, "reset", - GPIOD_OUT_LOW); - if (IS_ERR(ptn_bridge->gpio_rst_n)) { - ret = PTR_ERR(ptn_bridge->gpio_rst_n); - DRM_ERROR("cannot get gpio_rst_n %d\n", ret); - return ret; - } - - ret = of_property_read_u32(dev->of_node, "edid-emulation", - &ptn_bridge->edid_emulation); - if (ret) { - dev_err(dev, "Can't read EDID emulation value\n"); - return ret; - } - - ptn_bridge->bridge.funcs = &ptn3460_bridge_funcs; - ptn_bridge->bridge.of_node = dev->of_node; - ret = drm_bridge_add(&ptn_bridge->bridge); - if (ret) { - DRM_ERROR("Failed to add bridge\n"); - return ret; - } - - i2c_set_clientdata(client, ptn_bridge); - - return 0; -} - -static int ptn3460_remove(struct i2c_client *client) -{ - struct ptn3460_bridge *ptn_bridge = i2c_get_clientdata(client); - - drm_bridge_remove(&ptn_bridge->bridge); - - return 0; -} - -static const struct i2c_device_id ptn3460_i2c_table[] = { - {"ptn3460", 0}, - {}, -}; -MODULE_DEVICE_TABLE(i2c, ptn3460_i2c_table); - -static const struct of_device_id ptn3460_match[] = { - { .compatible = "nxp,ptn3460" }, - {}, -}; -MODULE_DEVICE_TABLE(of, ptn3460_match); - -static struct i2c_driver ptn3460_driver = { - .id_table = ptn3460_i2c_table, - .probe = ptn3460_probe, - .remove = ptn3460_remove, - .driver = { - .name = "nxp,ptn3460", - .owner = THIS_MODULE, - .of_match_table = ptn3460_match, - }, -}; -module_i2c_driver(ptn3460_driver); - -MODULE_AUTHOR("Sean Paul "); -MODULE_DESCRIPTION("NXP ptn3460 eDP-LVDS converter driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index b91400329..b1619e29a 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -92,7 +92,7 @@ static int cirrus_pm_suspend(struct device *dev) if (cdev->mode_info.gfbdev) { console_lock(); - fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 1); + drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 1); console_unlock(); } @@ -109,7 +109,7 @@ static int cirrus_pm_resume(struct device *dev) if (cdev->mode_info.gfbdev) { console_lock(); - fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 0); + drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 0); console_unlock(); } diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c index 13ddf1c4b..589103bcc 100644 --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c @@ -98,7 +98,7 @@ static void cirrus_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct cirrus_fbdev *afbdev = info->par; - sys_fillrect(info, rect); + drm_fb_helper_sys_fillrect(info, rect); cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width, rect->height); } @@ -107,7 +107,7 @@ static void cirrus_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct cirrus_fbdev *afbdev = info->par; - sys_copyarea(info, area); + drm_fb_helper_sys_copyarea(info, area); cirrus_dirty_update(afbdev, area->dx, area->dy, area->width, area->height); } @@ -116,7 +116,7 @@ static void cirrus_imageblit(struct fb_info *info, const struct fb_image *image) { struct cirrus_fbdev *afbdev = info->par; - sys_imageblit(info, image); + drm_fb_helper_sys_imageblit(info, image); cirrus_dirty_update(afbdev, image->dx, image->dy, image->width, image->height); } @@ -165,12 +165,10 @@ static int cirrusfb_create(struct drm_fb_helper *helper, { struct cirrus_fbdev *gfbdev = container_of(helper, struct cirrus_fbdev, helper); - struct drm_device *dev = gfbdev->helper.dev; struct cirrus_device *cdev = gfbdev->helper.dev->dev_private; struct fb_info *info; struct drm_framebuffer *fb; struct drm_mode_fb_cmd2 mode_cmd; - struct device *device = &dev->pdev->dev; void *sysram; struct drm_gem_object *gobj = NULL; struct cirrus_bo *bo = NULL; @@ -195,9 +193,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper, if (!sysram) return -ENOMEM; - info = framebuffer_alloc(0, device); - if (info == NULL) - return -ENOMEM; + info = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(info)) + return PTR_ERR(info); info->par = gfbdev; @@ -216,11 +214,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper, /* setup helper */ gfbdev->helper.fb = fb; - gfbdev->helper.fbdev = info; strcpy(info->fix.id, "cirrusdrmfb"); - info->flags = FBINFO_DEFAULT; info->fbops = &cirrusfb_ops; @@ -229,11 +225,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper, sizes->fb_height); /* setup aperture base/size for vesafb takeover */ - info->apertures = alloc_apertures(1); - if (!info->apertures) { - ret = -ENOMEM; - goto out_iounmap; - } info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base; info->apertures->ranges[0].size = cdev->mc.vram_size; @@ -246,13 +237,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper, info->fix.mmio_start = 0; info->fix.mmio_len = 0; - ret = fb_alloc_cmap(&info->cmap, 256, 0); - if (ret) { - DRM_ERROR("%s: can't allocate color map\n", info->fix.id); - ret = -ENOMEM; - goto out_iounmap; - } - DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start); DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len); @@ -260,24 +244,15 @@ static int cirrusfb_create(struct drm_fb_helper *helper, DRM_INFO(" pitch is %d\n", fb->pitches[0]); return 0; -out_iounmap: - return ret; } static int cirrus_fbdev_destroy(struct drm_device *dev, struct cirrus_fbdev *gfbdev) { - struct fb_info *info; struct cirrus_framebuffer *gfb = &gfbdev->gfb; - if (gfbdev->helper.fbdev) { - info = gfbdev->helper.fbdev; - - unregister_framebuffer(info); - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - framebuffer_release(info); - } + drm_fb_helper_unregister_fbi(&gfbdev->helper); + drm_fb_helper_release_fbi(&gfbdev->helper); if (gfb->obj) { drm_gem_object_unreference_unlocked(gfb->obj); diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c index e4b976658..055fd86ba 100644 --- a/drivers/gpu/drm/cirrus/cirrus_main.c +++ b/drivers/gpu/drm/cirrus/cirrus_main.c @@ -293,25 +293,18 @@ cirrus_dumb_mmap_offset(struct drm_file *file, uint64_t *offset) { struct drm_gem_object *obj; - int ret; struct cirrus_bo *bo; - mutex_lock(&dev->struct_mutex); obj = drm_gem_object_lookup(dev, file, handle); - if (obj == NULL) { - ret = -ENOENT; - goto out_unlock; - } + if (obj == NULL) + return -ENOENT; bo = gem_to_cirrus_bo(obj); *offset = cirrus_bo_mmap_offset(bo); - drm_gem_object_unreference(obj); - ret = 0; -out_unlock: - mutex_unlock(&dev->struct_mutex); - return ret; + drm_gem_object_unreference_unlocked(obj); + return 0; } bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height, diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index f6f2fb58e..f7d5166f8 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -153,9 +153,15 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) if (!connector) continue; - WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); - - connector->funcs->atomic_destroy_state(connector, + /* + * FIXME: Async commits can race with connector unplugging and + * there's currently nothing that prevents cleanup up state for + * deleted connectors. As long as the callback doesn't look at + * the connector we'll be fine though, so make sure that's the + * case by setting all connector pointers to NULL. + */ + state->connector_states[i]->connector = NULL; + connector->funcs->atomic_destroy_state(NULL, state->connector_states[i]); state->connectors[i] = NULL; state->connector_states[i] = NULL; @@ -1063,7 +1069,7 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state, * Changed connectors are already in @state, so only need to look at the * current configuration. */ - list_for_each_entry(connector, &config->connector_list, head) { + drm_for_each_connector(connector, state->dev) { if (connector->state->crtc != crtc) continue; @@ -1463,24 +1469,18 @@ retry: if (get_user(obj_id, objs_ptr + copied_objs)) { ret = -EFAULT; - goto fail; + goto out; } obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY); if (!obj || !obj->properties) { ret = -ENOENT; - goto fail; - } - - if (obj->type == DRM_MODE_OBJECT_PLANE) { - plane = obj_to_plane(obj); - plane_mask |= (1 << drm_plane_index(plane)); - plane->old_fb = plane->fb; + goto out; } if (get_user(count_props, count_props_ptr + copied_objs)) { ret = -EFAULT; - goto fail; + goto out; } copied_objs++; @@ -1492,28 +1492,35 @@ retry: if (get_user(prop_id, props_ptr + copied_props)) { ret = -EFAULT; - goto fail; + goto out; } prop = drm_property_find(dev, prop_id); if (!prop) { ret = -ENOENT; - goto fail; + goto out; } if (copy_from_user(&prop_value, prop_values_ptr + copied_props, sizeof(prop_value))) { ret = -EFAULT; - goto fail; + goto out; } ret = atomic_set_prop(state, obj, prop, prop_value); if (ret) - goto fail; + goto out; copied_props++; } + + if (obj->type == DRM_MODE_OBJECT_PLANE && count_props && + !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) { + plane = obj_to_plane(obj); + plane_mask |= (1 << drm_plane_index(plane)); + plane->old_fb = plane->fb; + } } if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { @@ -1523,7 +1530,7 @@ retry: e = create_vblank_event(dev, file_priv, arg->user_data); if (!e) { ret = -ENOMEM; - goto fail; + goto out; } crtc_state->event = e; @@ -1531,15 +1538,18 @@ retry: } if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) { + /* + * Unlike commit, check_only does not clean up state. + * Below we call drm_atomic_state_free for it. + */ ret = drm_atomic_check_only(state); - /* _check_only() does not free state, unlike _commit() */ - drm_atomic_state_free(state); } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { ret = drm_atomic_async_commit(state); } else { ret = drm_atomic_commit(state); } +out: /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping * locks (ie. while it is still safe to deref plane->state). We * need to do this here because the driver entry points cannot @@ -1552,41 +1562,40 @@ retry: drm_framebuffer_reference(new_fb); plane->fb = new_fb; plane->crtc = plane->state->crtc; - } else { - plane->old_fb = NULL; - } - if (plane->old_fb) { - drm_framebuffer_unreference(plane->old_fb); - plane->old_fb = NULL; + + if (plane->old_fb) + drm_framebuffer_unreference(plane->old_fb); } + plane->old_fb = NULL; } - drm_modeset_drop_locks(&ctx); - drm_modeset_acquire_fini(&ctx); - - return ret; - -fail: - if (ret == -EDEADLK) - goto backoff; + if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { + /* + * TEST_ONLY and PAGE_FLIP_EVENT are mutually exclusive, + * if they weren't, this code should be called on success + * for TEST_ONLY too. + */ - if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { for_each_crtc_in_state(state, crtc, crtc_state, i) { - destroy_vblank_event(dev, file_priv, crtc_state->event); - crtc_state->event = NULL; + if (!crtc_state->event) + continue; + + destroy_vblank_event(dev, file_priv, + crtc_state->event); } } - drm_atomic_state_free(state); + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + drm_modeset_backoff(&ctx); + goto retry; + } + + if (ret || arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) + drm_atomic_state_free(state); drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); return ret; - -backoff: - drm_atomic_state_clear(state); - drm_modeset_backoff(&ctx); - - goto retry; } diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 9dcc7280e..aecb5d69b 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -89,7 +89,7 @@ get_current_crtc_for_encoder(struct drm_device *dev, WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); - list_for_each_entry(connector, &config->connector_list, head) { + drm_for_each_connector(connector, dev) { if (connector->state->best_encoder != encoder) continue; @@ -124,7 +124,7 @@ steal_encoder(struct drm_atomic_state *state, if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); - crtc_state->mode_changed = true; + crtc_state->connectors_changed = true; list_for_each_entry(connector, &config->connector_list, head) { if (connector->state->best_encoder != encoder) @@ -174,14 +174,14 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) idx = drm_crtc_index(connector->state->crtc); crtc_state = state->crtc_states[idx]; - crtc_state->mode_changed = true; + crtc_state->connectors_changed = true; } if (connector_state->crtc) { idx = drm_crtc_index(connector_state->crtc); crtc_state = state->crtc_states[idx]; - crtc_state->mode_changed = true; + crtc_state->connectors_changed = true; } } @@ -241,7 +241,7 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) idx = drm_crtc_index(connector_state->crtc); crtc_state = state->crtc_states[idx]; - crtc_state->mode_changed = true; + crtc_state->connectors_changed = true; DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n", connector->base.id, @@ -264,7 +264,8 @@ mode_fixup(struct drm_atomic_state *state) bool ret; for_each_crtc_in_state(state, crtc, crtc_state, i) { - if (!crtc_state->mode_changed) + if (!crtc_state->mode_changed && + !crtc_state->connectors_changed) continue; drm_mode_copy(&crtc_state->adjusted_mode, &crtc_state->mode); @@ -306,7 +307,7 @@ mode_fixup(struct drm_atomic_state *state) encoder->base.id, encoder->name); return ret; } - } else { + } else if (funcs->mode_fixup) { ret = funcs->mode_fixup(encoder, &crtc_state->mode, &crtc_state->adjusted_mode); if (!ret) { @@ -320,7 +321,8 @@ mode_fixup(struct drm_atomic_state *state) for_each_crtc_in_state(state, crtc, crtc_state, i) { const struct drm_crtc_helper_funcs *funcs; - if (!crtc_state->mode_changed) + if (!crtc_state->mode_changed && + !crtc_state->connectors_changed) continue; funcs = crtc->helper_private; @@ -346,9 +348,14 @@ mode_fixup(struct drm_atomic_state *state) * * Check the state object to see if the requested state is physically possible. * This does all the crtc and connector related computations for an atomic - * update. It computes and updates crtc_state->mode_changed, adds any additional - * connectors needed for full modesets and calls down into ->mode_fixup - * functions of the driver backend. + * update and adds any additional connectors needed for full modesets and calls + * down into ->mode_fixup functions of the driver backend. + * + * crtc_state->mode_changed is set when the input mode is changed. + * crtc_state->connectors_changed is set when a connector is added or + * removed from the crtc. + * crtc_state->active_changed is set when crtc_state->active changes, + * which is used for dpms. * * IMPORTANT: * @@ -381,7 +388,17 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, if (crtc->state->enable != crtc_state->enable) { DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n", crtc->base.id); + + /* + * For clarity this assignment is done here, but + * enable == 0 is only true when there are no + * connectors and a NULL mode. + * + * The other way around is true as well. enable != 0 + * iff connectors are attached and a mode is set. + */ crtc_state->mode_changed = true; + crtc_state->connectors_changed = true; } } @@ -456,6 +473,9 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset); * This does all the plane update related checks using by calling into the * ->atomic_check hooks provided by the driver. * + * It also sets crtc_state->planes_changed to indicate that a crtc has + * updated planes. + * * RETURNS * Zero for success or -errno */ @@ -648,15 +668,29 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, struct drm_crtc_state *old_crtc_state; int i; - /* clear out existing links */ + /* clear out existing links and update dpms */ for_each_connector_in_state(old_state, connector, old_conn_state, i) { - if (!connector->encoder) - continue; + if (connector->encoder) { + WARN_ON(!connector->encoder->crtc); - WARN_ON(!connector->encoder->crtc); + connector->encoder->crtc = NULL; + connector->encoder = NULL; + } - connector->encoder->crtc = NULL; - connector->encoder = NULL; + crtc = connector->state->crtc; + if ((!crtc && old_conn_state->crtc) || + (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) { + struct drm_property *dpms_prop = + dev->mode_config.dpms_property; + int mode = DRM_MODE_DPMS_OFF; + + if (crtc && crtc->state->active) + mode = DRM_MODE_DPMS_ON; + + connector->dpms = mode; + drm_object_property_set_value(&connector->base, + dpms_prop, mode); + } } /* set new links */ @@ -673,10 +707,16 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, /* set legacy state in the crtc structure */ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { + struct drm_plane *primary = crtc->primary; + crtc->mode = crtc->state->mode; crtc->enabled = crtc->state->enable; - crtc->x = crtc->primary->state->src_x >> 16; - crtc->y = crtc->primary->state->src_y >> 16; + + if (drm_atomic_get_existing_plane_state(old_state, primary) && + primary->state->crtc == crtc) { + crtc->x = primary->state->src_x >> 16; + crtc->y = primary->state->src_y >> 16; + } if (crtc->state->enable) drm_calc_timestamping_constants(crtc, @@ -750,7 +790,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) * This function shuts down all the outputs that need to be shut down and * prepares them (if required) with the new mode. * - * For compatability with legacy crtc helpers this should be called before + * For compatibility with legacy crtc helpers this should be called before * drm_atomic_helper_commit_planes(), which is what the default commit function * does. But drivers with different needs can group the modeset commits together * and do the plane commits at the end. This is useful for drivers doing runtime @@ -775,7 +815,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables); * This function enables all the outputs with the new configuration which had to * be turned off for the update. * - * For compatability with legacy crtc helpers this should be called after + * For compatibility with legacy crtc helpers this should be called after * drm_atomic_helper_commit_planes(), which is what the default commit function * does. But drivers with different needs can group the modeset commits together * and do the plane commits at the end. This is useful for drivers doing runtime @@ -926,7 +966,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, continue; old_crtc_state->enable = true; - old_crtc_state->last_vblank_count = drm_vblank_count(dev, i); + old_crtc_state->last_vblank_count = drm_crtc_vblank_count(crtc); } for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { @@ -935,7 +975,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, ret = wait_event_timeout(dev->vblank[i].queue, old_crtc_state->last_vblank_count != - drm_vblank_count(dev, i), + drm_crtc_vblank_count(crtc), msecs_to_jiffies(50)); drm_crtc_vblank_put(crtc); @@ -1146,7 +1186,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, if (!funcs || !funcs->atomic_begin) continue; - funcs->atomic_begin(crtc); + funcs->atomic_begin(crtc, old_crtc_state); } for_each_plane_in_state(old_state, plane, old_plane_state, i) { @@ -1176,7 +1216,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, if (!funcs || !funcs->atomic_flush) continue; - funcs->atomic_flush(crtc); + funcs->atomic_flush(crtc, old_crtc_state); } } EXPORT_SYMBOL(drm_atomic_helper_commit_planes); @@ -1212,7 +1252,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state) crtc_funcs = crtc->helper_private; if (crtc_funcs && crtc_funcs->atomic_begin) - crtc_funcs->atomic_begin(crtc); + crtc_funcs->atomic_begin(crtc, old_crtc_state); drm_for_each_plane_mask(plane, crtc->dev, plane_mask) { struct drm_plane_state *old_plane_state = @@ -1235,7 +1275,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state) } if (crtc_funcs && crtc_funcs->atomic_flush) - crtc_funcs->atomic_flush(crtc); + crtc_funcs->atomic_flush(crtc, old_crtc_state); } EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc); @@ -1923,10 +1963,6 @@ retry: if (ret != 0) goto fail; - /* TODO: ->page_flip is the only driver callback where the core - * doesn't update plane->fb. For now patch it up here. */ - plane->fb = plane->state->fb; - /* Driver takes ownership of state on successful async commit. */ return 0; fail: @@ -1960,9 +1996,12 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip); * implementing the legacy DPMS connector interface. It computes the new desired * ->active state for the corresponding CRTC (if the connector is enabled) and * updates it. + * + * Returns: + * Returns 0 on success, negative errno numbers on failure. */ -void drm_atomic_helper_connector_dpms(struct drm_connector *connector, - int mode) +int drm_atomic_helper_connector_dpms(struct drm_connector *connector, + int mode) { struct drm_mode_config *config = &connector->dev->mode_config; struct drm_atomic_state *state; @@ -1971,6 +2010,7 @@ void drm_atomic_helper_connector_dpms(struct drm_connector *connector, struct drm_connector *tmp_connector; int ret; bool active = false; + int old_mode = connector->dpms; if (mode != DRM_MODE_DPMS_ON) mode = DRM_MODE_DPMS_OFF; @@ -1979,22 +2019,23 @@ void drm_atomic_helper_connector_dpms(struct drm_connector *connector, crtc = connector->state->crtc; if (!crtc) - return; + return 0; - /* FIXME: ->dpms has no return value so can't forward the -ENOMEM. */ state = drm_atomic_state_alloc(connector->dev); if (!state) - return; + return -ENOMEM; state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); retry: crtc_state = drm_atomic_get_crtc_state(state, crtc); - if (IS_ERR(crtc_state)) - return; + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + goto fail; + } WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); - list_for_each_entry(tmp_connector, &config->connector_list, head) { + drm_for_each_connector(tmp_connector, connector->dev) { if (tmp_connector->state->crtc != crtc) continue; @@ -2009,17 +2050,16 @@ retry: if (ret != 0) goto fail; - /* Driver takes ownership of state on successful async commit. */ - return; + /* Driver takes ownership of state on successful commit. */ + return 0; fail: if (ret == -EDEADLK) goto backoff; + connector->dpms = old_mode; drm_atomic_state_free(state); - WARN(1, "Driver bug: Changing ->active failed with ret=%i\n", ret); - - return; + return ret; backoff: drm_atomic_state_clear(state); drm_atomic_legacy_backoff(state); @@ -2080,6 +2120,7 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc, state->mode_changed = false; state->active_changed = false; state->planes_changed = false; + state->connectors_changed = false; state->event = NULL; } EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state); diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c index 9b23525c0..192a5f9ee 100644 --- a/drivers/gpu/drm/drm_context.c +++ b/drivers/gpu/drm/drm_context.c @@ -53,6 +53,10 @@ struct drm_ctx_list { */ void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle) { + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && + drm_core_check_feature(dev, DRIVER_MODESET)) + return; + mutex_lock(&dev->struct_mutex); idr_remove(&dev->ctx_idr, ctx_handle); mutex_unlock(&dev->struct_mutex); @@ -85,10 +89,13 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev) * * Initialise the drm_device::ctx_idr */ -int drm_legacy_ctxbitmap_init(struct drm_device * dev) +void drm_legacy_ctxbitmap_init(struct drm_device * dev) { + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && + drm_core_check_feature(dev, DRIVER_MODESET)) + return; + idr_init(&dev->ctx_idr); - return 0; } /** @@ -101,6 +108,10 @@ int drm_legacy_ctxbitmap_init(struct drm_device * dev) */ void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev) { + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && + drm_core_check_feature(dev, DRIVER_MODESET)) + return; + mutex_lock(&dev->struct_mutex); idr_destroy(&dev->ctx_idr); mutex_unlock(&dev->struct_mutex); @@ -119,6 +130,10 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) { struct drm_ctx_list *pos, *tmp; + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && + drm_core_check_feature(dev, DRIVER_MODESET)) + return; + mutex_lock(&dev->ctxlist_mutex); list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) { @@ -161,6 +176,10 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data, struct drm_local_map *map; struct drm_map_list *_entry; + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && + drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->struct_mutex); map = idr_find(&dev->ctx_idr, request->ctx_id); @@ -205,6 +224,10 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data, struct drm_local_map *map = NULL; struct drm_map_list *r_list = NULL; + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && + drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->struct_mutex); list_for_each_entry(r_list, &dev->maplist, head) { if (r_list->map @@ -305,6 +328,10 @@ int drm_legacy_resctx(struct drm_device *dev, void *data, struct drm_ctx ctx; int i; + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && + drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + if (res->count >= DRM_RESERVED_CONTEXTS) { memset(&ctx, 0, sizeof(ctx)); for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { @@ -335,6 +362,10 @@ int drm_legacy_addctx(struct drm_device *dev, void *data, struct drm_ctx_list *ctx_entry; struct drm_ctx *ctx = data; + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && + drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + ctx->handle = drm_legacy_ctxbitmap_next(dev); if (ctx->handle == DRM_KERNEL_CONTEXT) { /* Skip kernel's context and get a new one. */ @@ -378,6 +409,10 @@ int drm_legacy_getctx(struct drm_device *dev, void *data, { struct drm_ctx *ctx = data; + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && + drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + /* This is 0, because we don't handle any context flags */ ctx->flags = 0; @@ -400,6 +435,10 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data, { struct drm_ctx *ctx = data; + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && + drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + DRM_DEBUG("%d\n", ctx->handle); return drm_context_switch(dev, dev->last_context, ctx->handle); } @@ -420,6 +459,10 @@ int drm_legacy_newctx(struct drm_device *dev, void *data, { struct drm_ctx *ctx = data; + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && + drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + DRM_DEBUG("%d\n", ctx->handle); drm_context_switch_complete(dev, file_priv, ctx->handle); @@ -442,6 +485,10 @@ int drm_legacy_rmctx(struct drm_device *dev, void *data, { struct drm_ctx *ctx = data; + if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && + drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + DRM_DEBUG("%d\n", ctx->handle); if (ctx->handle != DRM_KERNEL_CONTEXT) { if (dev->driver->context_dtor) diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index fed748311..8328e7059 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -615,7 +615,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb) if (atomic_read(&fb->refcount.refcount) > 1) { drm_modeset_lock_all(dev); /* remove from any CRTC */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + drm_for_each_crtc(crtc, dev) { if (crtc->primary->fb == fb) { /* should turn off the crtc */ memset(&set, 0, sizeof(struct drm_mode_set)); @@ -627,7 +627,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb) } } - list_for_each_entry(plane, &dev->mode_config.plane_list, head) { + drm_for_each_plane(plane, dev) { if (plane->fb == fb) drm_plane_force_disable(plane); } @@ -736,7 +736,7 @@ unsigned int drm_crtc_index(struct drm_crtc *crtc) unsigned int index = 0; struct drm_crtc *tmp; - list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) { + drm_for_each_crtc(tmp, crtc->dev) { if (tmp == crtc) return index; @@ -988,7 +988,7 @@ unsigned int drm_connector_index(struct drm_connector *connector) WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); - list_for_each_entry(tmp, &connector->dev->mode_config.connector_list, head) { + drm_for_each_connector(tmp, connector->dev) { if (tmp == connector) return index; @@ -1054,7 +1054,7 @@ void drm_connector_unplug_all(struct drm_device *dev) { struct drm_connector *connector; - /* taking the mode config mutex ends up in a clash with sysfs */ + /* FIXME: taking the mode config mutex ends up in a clash with sysfs */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) drm_connector_unregister(connector); @@ -1151,7 +1151,7 @@ EXPORT_SYMBOL(drm_encoder_cleanup); int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, const struct drm_plane_funcs *funcs, - const uint32_t *formats, uint32_t format_count, + const uint32_t *formats, unsigned int format_count, enum drm_plane_type type) { struct drm_mode_config *config = &dev->mode_config; @@ -1225,7 +1225,7 @@ EXPORT_SYMBOL(drm_universal_plane_init); int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, const struct drm_plane_funcs *funcs, - const uint32_t *formats, uint32_t format_count, + const uint32_t *formats, unsigned int format_count, bool is_primary) { enum drm_plane_type type; @@ -1280,7 +1280,7 @@ unsigned int drm_plane_index(struct drm_plane *plane) unsigned int index = 0; struct drm_plane *tmp; - list_for_each_entry(tmp, &plane->dev->mode_config.plane_list, head) { + drm_for_each_plane(tmp, plane->dev) { if (tmp == plane) return index; @@ -1305,7 +1305,7 @@ drm_plane_from_index(struct drm_device *dev, int idx) struct drm_plane *plane; unsigned int i = 0; - list_for_each_entry(plane, &dev->mode_config.plane_list, head) { + drm_for_each_plane(plane, dev) { if (i == idx) return plane; i++; @@ -1679,70 +1679,6 @@ int drm_mode_create_suggested_offset_properties(struct drm_device *dev) } EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties); -static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group) -{ - uint32_t total_objects = 0; - - total_objects += dev->mode_config.num_crtc; - total_objects += dev->mode_config.num_connector; - total_objects += dev->mode_config.num_encoder; - - group->id_list = kcalloc(total_objects, sizeof(uint32_t), GFP_KERNEL); - if (!group->id_list) - return -ENOMEM; - - group->num_crtcs = 0; - group->num_connectors = 0; - group->num_encoders = 0; - return 0; -} - -void drm_mode_group_destroy(struct drm_mode_group *group) -{ - kfree(group->id_list); - group->id_list = NULL; -} - -/* - * NOTE: Driver's shouldn't ever call drm_mode_group_init_legacy_group - it is - * the drm core's responsibility to set up mode control groups. - */ -int drm_mode_group_init_legacy_group(struct drm_device *dev, - struct drm_mode_group *group) -{ - struct drm_crtc *crtc; - struct drm_encoder *encoder; - struct drm_connector *connector; - int ret; - - ret = drm_mode_group_init(dev, group); - if (ret) - return ret; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) - group->id_list[group->num_crtcs++] = crtc->base.id; - - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) - group->id_list[group->num_crtcs + group->num_encoders++] = - encoder->base.id; - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) - group->id_list[group->num_crtcs + group->num_encoders + - group->num_connectors++] = connector->base.id; - - return 0; -} -EXPORT_SYMBOL(drm_mode_group_init_legacy_group); - -void drm_reinit_primary_mode_group(struct drm_device *dev) -{ - drm_modeset_lock_all(dev); - drm_mode_group_destroy(&dev->primary->mode_group); - drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group); - drm_modeset_unlock_all(dev); -} -EXPORT_SYMBOL(drm_reinit_primary_mode_group); - /** * drm_mode_getresources - get graphics configuration * @dev: drm device for the ioctl @@ -1771,12 +1707,11 @@ int drm_mode_getresources(struct drm_device *dev, void *data, int crtc_count = 0; int fb_count = 0; int encoder_count = 0; - int copied = 0, i; + int copied = 0; uint32_t __user *fb_id; uint32_t __user *crtc_id; uint32_t __user *connector_id; uint32_t __user *encoder_id; - struct drm_mode_group *mode_group; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; @@ -1809,24 +1744,14 @@ int drm_mode_getresources(struct drm_device *dev, void *data, /* mode_config.mutex protects the connector list against e.g. DP MST * connector hot-adding. CRTC/Plane lists are invariant. */ mutex_lock(&dev->mode_config.mutex); - if (!drm_is_primary_client(file_priv)) { + drm_for_each_crtc(crtc, dev) + crtc_count++; - mode_group = NULL; - list_for_each(lh, &dev->mode_config.crtc_list) - crtc_count++; + drm_for_each_connector(connector, dev) + connector_count++; - list_for_each(lh, &dev->mode_config.connector_list) - connector_count++; - - list_for_each(lh, &dev->mode_config.encoder_list) - encoder_count++; - } else { - - mode_group = &file_priv->master->minor->mode_group; - crtc_count = mode_group->num_crtcs; - connector_count = mode_group->num_connectors; - encoder_count = mode_group->num_encoders; - } + drm_for_each_encoder(encoder, dev) + encoder_count++; card_res->max_height = dev->mode_config.max_height; card_res->min_height = dev->mode_config.min_height; @@ -1837,25 +1762,13 @@ int drm_mode_getresources(struct drm_device *dev, void *data, if (card_res->count_crtcs >= crtc_count) { copied = 0; crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr; - if (!mode_group) { - list_for_each_entry(crtc, &dev->mode_config.crtc_list, - head) { - DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); - if (put_user(crtc->base.id, crtc_id + copied)) { - ret = -EFAULT; - goto out; - } - copied++; - } - } else { - for (i = 0; i < mode_group->num_crtcs; i++) { - if (put_user(mode_group->id_list[i], - crtc_id + copied)) { - ret = -EFAULT; - goto out; - } - copied++; + drm_for_each_crtc(crtc, dev) { + DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); + if (put_user(crtc->base.id, crtc_id + copied)) { + ret = -EFAULT; + goto out; } + copied++; } } card_res->count_crtcs = crtc_count; @@ -1864,29 +1777,15 @@ int drm_mode_getresources(struct drm_device *dev, void *data, if (card_res->count_encoders >= encoder_count) { copied = 0; encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr; - if (!mode_group) { - list_for_each_entry(encoder, - &dev->mode_config.encoder_list, - head) { - DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id, - encoder->name); - if (put_user(encoder->base.id, encoder_id + - copied)) { - ret = -EFAULT; - goto out; - } - copied++; - } - } else { - for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) { - if (put_user(mode_group->id_list[i], - encoder_id + copied)) { - ret = -EFAULT; - goto out; - } - copied++; + drm_for_each_encoder(encoder, dev) { + DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id, + encoder->name); + if (put_user(encoder->base.id, encoder_id + + copied)) { + ret = -EFAULT; + goto out; } - + copied++; } } card_res->count_encoders = encoder_count; @@ -1895,31 +1794,16 @@ int drm_mode_getresources(struct drm_device *dev, void *data, if (card_res->count_connectors >= connector_count) { copied = 0; connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr; - if (!mode_group) { - list_for_each_entry(connector, - &dev->mode_config.connector_list, - head) { - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", - connector->base.id, - connector->name); - if (put_user(connector->base.id, - connector_id + copied)) { - ret = -EFAULT; - goto out; - } - copied++; - } - } else { - int start = mode_group->num_crtcs + - mode_group->num_encoders; - for (i = start; i < start + mode_group->num_connectors; i++) { - if (put_user(mode_group->id_list[i], - connector_id + copied)) { - ret = -EFAULT; - goto out; - } - copied++; + drm_for_each_connector(connector, dev) { + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); + if (put_user(connector->base.id, + connector_id + copied)) { + ret = -EFAULT; + goto out; } + copied++; } } card_res->count_connectors = connector_count; @@ -2187,7 +2071,7 @@ static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder) /* For atomic drivers only state objects are synchronously updated and * protected by modeset locks, so check those first. */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_for_each_connector(connector, dev) { if (!connector->state) continue; @@ -2291,7 +2175,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data, plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr; /* Plane lists are invariant, no locking needed. */ - list_for_each_entry(plane, &config->plane_list, head) { + drm_for_each_plane(plane, dev) { /* * Unless userspace set the 'universal planes' * capability bit, only advertise overlays. @@ -2596,7 +2480,7 @@ int drm_mode_set_config_internal(struct drm_mode_set *set) * connectors from it), hence we need to refcount the fbs across all * crtcs. Atomic modeset will have saner semantics ... */ - list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) + drm_for_each_crtc(tmp, crtc->dev) tmp->primary->old_fb = tmp->primary->fb; fb = set->fb; @@ -2607,7 +2491,7 @@ int drm_mode_set_config_internal(struct drm_mode_set *set) crtc->primary->fb = fb; } - list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) { + drm_for_each_crtc(tmp, crtc->dev) { if (tmp->primary->fb) drm_framebuffer_reference(tmp->primary->fb); if (tmp->primary->old_fb) @@ -4221,7 +4105,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length, struct drm_property_blob *blob; int ret; - if (!length) + if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob)) return ERR_PTR(-EINVAL); blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); @@ -4301,7 +4185,6 @@ void drm_property_unreference_blob(struct drm_property_blob *blob) mutex_unlock(&dev->mode_config.blob_lock); else might_lock(&dev->mode_config.blob_lock); - } EXPORT_SYMBOL(drm_property_unreference_blob); @@ -4472,9 +4355,7 @@ static int drm_property_replace_global_blob(struct drm_device *dev, goto err_created; } - if (old_blob) - drm_property_unreference_blob(old_blob); - + drm_property_unreference_blob(old_blob); *replace = new_blob; return 0; @@ -4573,7 +4454,7 @@ int drm_mode_createblob_ioctl(struct drm_device *dev, * not associated with any file_priv. */ mutex_lock(&dev->mode_config.blob_lock); out_resp->blob_id = blob->base.id; - list_add_tail(&file_priv->blobs, &blob->head_file); + list_add_tail(&blob->head_file, &file_priv->blobs); mutex_unlock(&dev->mode_config.blob_lock); return 0; @@ -4872,9 +4753,9 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj, /* Do DPMS ourselves */ if (property == connector->dev->mode_config.dpms_property) { - if (connector->funcs->dpms) - (*connector->funcs->dpms)(connector, (int)value); ret = 0; + if (connector->funcs->dpms) + ret = (*connector->funcs->dpms)(connector, (int)value); } else if (connector->funcs->set_property) ret = connector->funcs->set_property(connector, property, value); @@ -5349,13 +5230,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, /* Keep the old fb, don't unref it. */ crtc->primary->old_fb = NULL; } else { - /* - * Warn if the driver hasn't properly updated the crtc->fb - * field to reflect that the new framebuffer is now used. - * Failing to do so will screw with the reference counting - * on framebuffers. - */ - WARN_ON(crtc->primary->fb != fb); + crtc->primary->fb = fb; /* Unref only the old framebuffer. */ fb = NULL; } @@ -5386,21 +5261,23 @@ void drm_mode_config_reset(struct drm_device *dev) struct drm_encoder *encoder; struct drm_connector *connector; - list_for_each_entry(plane, &dev->mode_config.plane_list, head) + drm_for_each_plane(plane, dev) if (plane->funcs->reset) plane->funcs->reset(plane); - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + drm_for_each_crtc(crtc, dev) if (crtc->funcs->reset) crtc->funcs->reset(crtc); - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) + drm_for_each_encoder(encoder, dev) if (encoder->funcs->reset) encoder->funcs->reset(encoder); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) + mutex_lock(&dev->mode_config.mutex); + drm_for_each_connector(connector, dev) if (connector->funcs->reset) connector->funcs->reset(connector); + mutex_unlock(&dev->mode_config.mutex); } EXPORT_SYMBOL(drm_mode_config_reset); diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 393114df8..ef534758a 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -121,7 +121,7 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder) WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); } - list_for_each_entry(connector, &dev->mode_config.connector_list, head) + drm_for_each_connector(connector, dev) if (connector->encoder == encoder) return true; return false; @@ -151,7 +151,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc) if (!oops_in_progress) WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) + drm_for_each_encoder(encoder, dev) if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder)) return true; return false; @@ -180,7 +180,7 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev) drm_warn_on_modeset_not_all_locked(dev); - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + drm_for_each_encoder(encoder, dev) { if (!drm_helper_encoder_in_use(encoder)) { drm_encoder_disable(encoder); /* disconnect encoder from any connector */ @@ -188,7 +188,7 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev) } } - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + drm_for_each_crtc(crtc, dev) { const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; crtc->enabled = drm_helper_crtc_in_use(crtc); if (!crtc->enabled) { @@ -230,7 +230,7 @@ drm_crtc_prepare_encoders(struct drm_device *dev) const struct drm_encoder_helper_funcs *encoder_funcs; struct drm_encoder *encoder; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + drm_for_each_encoder(encoder, dev) { encoder_funcs = encoder->helper_private; /* Disable unused encoders */ if (encoder->crtc == NULL) @@ -305,7 +305,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, * adjust it according to limitations or connector properties, and also * a chance to reject the mode entirely. */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + drm_for_each_encoder(encoder, dev) { if (encoder->crtc != crtc) continue; @@ -334,7 +334,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, crtc->hwmode = *adjusted_mode; /* Prepare the encoders and CRTCs before setting the mode. */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + drm_for_each_encoder(encoder, dev) { if (encoder->crtc != crtc) continue; @@ -359,7 +359,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, if (!ret) goto done; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + drm_for_each_encoder(encoder, dev) { if (encoder->crtc != crtc) continue; @@ -376,7 +376,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, /* Now enable the clocks, plane, pipe, and connectors that we set up. */ crtc_funcs->commit(crtc); - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + drm_for_each_encoder(encoder, dev) { if (encoder->crtc != crtc) continue; @@ -418,11 +418,11 @@ drm_crtc_helper_disable(struct drm_crtc *crtc) struct drm_encoder *encoder; /* Decouple all encoders and their attached connectors from this crtc */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + drm_for_each_encoder(encoder, dev) { if (encoder->crtc != crtc) continue; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_for_each_connector(connector, dev) { if (connector->encoder != encoder) continue; @@ -519,12 +519,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) * restored, not the drivers personal bookkeeping. */ count = 0; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + drm_for_each_encoder(encoder, dev) { save_encoders[count++] = *encoder; } count = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_for_each_connector(connector, dev) { save_connectors[count++] = *connector; } @@ -562,7 +562,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) /* a) traverse passed in connector list and get encoders for them */ count = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_for_each_connector(connector, dev) { const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; new_encoder = connector->encoder; @@ -602,7 +602,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) } count = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_for_each_connector(connector, dev) { if (!connector->encoder) continue; @@ -685,12 +685,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) fail: /* Restore all previous data. */ count = 0; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + drm_for_each_encoder(encoder, dev) { *encoder = save_encoders[count++]; } count = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_for_each_connector(connector, dev) { *connector = save_connectors[count++]; } @@ -712,7 +712,7 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder) struct drm_connector *connector; struct drm_device *dev = encoder->dev; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) + drm_for_each_connector(connector, dev) if (connector->encoder == encoder) if (connector->dpms < dpms) dpms = connector->dpms; @@ -746,7 +746,7 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc) struct drm_connector *connector; struct drm_device *dev = crtc->dev; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) + drm_for_each_connector(connector, dev) if (connector->encoder && connector->encoder->crtc == crtc) if (connector->dpms < dpms) dpms = connector->dpms; @@ -762,15 +762,18 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc) * implementing the DPMS connector attribute. It computes the new desired DPMS * state for all encoders and crtcs in the output mesh and calls the ->dpms() * callback provided by the driver appropriately. + * + * Returns: + * Always returns 0. */ -void drm_helper_connector_dpms(struct drm_connector *connector, int mode) +int drm_helper_connector_dpms(struct drm_connector *connector, int mode) { struct drm_encoder *encoder = connector->encoder; struct drm_crtc *crtc = encoder ? encoder->crtc : NULL; int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF; if (mode == connector->dpms) - return; + return 0; old_dpms = connector->dpms; connector->dpms = mode; @@ -802,7 +805,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode) } } - return; + return 0; } EXPORT_SYMBOL(drm_helper_connector_dpms); @@ -862,7 +865,7 @@ void drm_helper_resume_force_mode(struct drm_device *dev) bool ret; drm_modeset_lock_all(dev); - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + drm_for_each_crtc(crtc, dev) { if (!crtc->enabled) continue; @@ -876,7 +879,7 @@ void drm_helper_resume_force_mode(struct drm_device *dev) /* Turn off outputs that were already powered off */ if (drm_helper_choose_crtc_dpms(crtc)) { - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + drm_for_each_encoder(encoder, dev) { if(encoder->crtc != crtc) continue; @@ -928,15 +931,15 @@ int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mod if (crtc->funcs->atomic_duplicate_state) crtc_state = crtc->funcs->atomic_duplicate_state(crtc); else { - crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); - if (!crtc_state) - return -ENOMEM; - if (crtc->state) - __drm_atomic_helper_crtc_duplicate_state(crtc, crtc_state); - else - crtc_state->crtc = crtc; + if (!crtc->state) + drm_atomic_helper_crtc_reset(crtc); + + crtc_state = drm_atomic_helper_crtc_duplicate_state(crtc); } + if (!crtc_state) + return -ENOMEM; + crtc_state->planes_changed = true; crtc_state->mode_changed = true; ret = drm_atomic_set_mode_for_crtc(crtc_state, mode); @@ -957,11 +960,11 @@ int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mod ret = drm_helper_crtc_mode_set_base(crtc, x, y, old_fb); out: - if (crtc->funcs->atomic_destroy_state) - crtc->funcs->atomic_destroy_state(crtc, crtc_state); - else { - __drm_atomic_helper_crtc_destroy_state(crtc, crtc_state); - kfree(crtc_state); + if (crtc_state) { + if (crtc->funcs->atomic_destroy_state) + crtc->funcs->atomic_destroy_state(crtc, crtc_state); + else + drm_atomic_helper_crtc_destroy_state(crtc, crtc_state); } return ret; diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 80a02a412..291734e87 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -159,6 +159,8 @@ int drm_dp_bw_code_to_link_rate(u8 link_bw) } EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate); +#define AUX_RETRY_INTERVAL 500 /* us */ + /** * DOC: dp helpers * @@ -213,7 +215,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, return -EIO; case DP_AUX_NATIVE_REPLY_DEFER: - usleep_range(400, 500); + usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100); break; } } @@ -422,6 +424,90 @@ static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter) I2C_FUNC_10BIT_ADDR; } +#define AUX_PRECHARGE_LEN 10 /* 10 to 16 */ +#define AUX_SYNC_LEN (16 + 4) /* preamble + AUX_SYNC_END */ +#define AUX_STOP_LEN 4 +#define AUX_CMD_LEN 4 +#define AUX_ADDRESS_LEN 20 +#define AUX_REPLY_PAD_LEN 4 +#define AUX_LENGTH_LEN 8 + +/* + * Calculate the duration of the AUX request/reply in usec. Gives the + * "best" case estimate, ie. successful while as short as possible. + */ +static int drm_dp_aux_req_duration(const struct drm_dp_aux_msg *msg) +{ + int len = AUX_PRECHARGE_LEN + AUX_SYNC_LEN + AUX_STOP_LEN + + AUX_CMD_LEN + AUX_ADDRESS_LEN + AUX_LENGTH_LEN; + + if ((msg->request & DP_AUX_I2C_READ) == 0) + len += msg->size * 8; + + return len; +} + +static int drm_dp_aux_reply_duration(const struct drm_dp_aux_msg *msg) +{ + int len = AUX_PRECHARGE_LEN + AUX_SYNC_LEN + AUX_STOP_LEN + + AUX_CMD_LEN + AUX_REPLY_PAD_LEN; + + /* + * For read we expect what was asked. For writes there will + * be 0 or 1 data bytes. Assume 0 for the "best" case. + */ + if (msg->request & DP_AUX_I2C_READ) + len += msg->size * 8; + + return len; +} + +#define I2C_START_LEN 1 +#define I2C_STOP_LEN 1 +#define I2C_ADDR_LEN 9 /* ADDRESS + R/W + ACK/NACK */ +#define I2C_DATA_LEN 9 /* DATA + ACK/NACK */ + +/* + * Calculate the length of the i2c transfer in usec, assuming + * the i2c bus speed is as specified. Gives the the "worst" + * case estimate, ie. successful while as long as possible. + * Doesn't account the the "MOT" bit, and instead assumes each + * message includes a START, ADDRESS and STOP. Neither does it + * account for additional random variables such as clock stretching. + */ +static int drm_dp_i2c_msg_duration(const struct drm_dp_aux_msg *msg, + int i2c_speed_khz) +{ + /* AUX bitrate is 1MHz, i2c bitrate as specified */ + return DIV_ROUND_UP((I2C_START_LEN + I2C_ADDR_LEN + + msg->size * I2C_DATA_LEN + + I2C_STOP_LEN) * 1000, i2c_speed_khz); +} + +/* + * Deterine how many retries should be attempted to successfully transfer + * the specified message, based on the estimated durations of the + * i2c and AUX transfers. + */ +static int drm_dp_i2c_retry_count(const struct drm_dp_aux_msg *msg, + int i2c_speed_khz) +{ + int aux_time_us = drm_dp_aux_req_duration(msg) + + drm_dp_aux_reply_duration(msg); + int i2c_time_us = drm_dp_i2c_msg_duration(msg, i2c_speed_khz); + + return DIV_ROUND_UP(i2c_time_us, aux_time_us + AUX_RETRY_INTERVAL); +} + +/* + * FIXME currently assumes 10 kHz as some real world devices seem + * to require it. We should query/set the speed via DPCD if supported. + */ +static int dp_aux_i2c_speed_khz __read_mostly = 10; +module_param_unsafe(dp_aux_i2c_speed_khz, int, 0644); +MODULE_PARM_DESC(dp_aux_i2c_speed_khz, + "Assumed speed of the i2c bus in kHz, (1-400, default 10)"); + /* * Transfer a single I2C-over-AUX message and handle various error conditions, * retrying the transaction as appropriate. It is assumed that the @@ -434,13 +520,16 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { unsigned int retry, defer_i2c; int ret; - /* * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device * is required to retry at least seven times upon receiving AUX_DEFER * before giving up the AUX transaction. + * + * We also try to account for the i2c bus speed. */ - for (retry = 0, defer_i2c = 0; retry < (7 + defer_i2c); retry++) { + int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz)); + + for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) { mutex_lock(&aux->hw_mutex); ret = aux->transfer(aux, msg); mutex_unlock(&aux->hw_mutex); @@ -476,7 +565,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) * For now just defer for long enough to hopefully be * safe for all use-cases. */ - usleep_range(500, 600); + usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100); continue; default: @@ -506,7 +595,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) aux->i2c_defer_count++; if (defer_i2c < 7) defer_i2c++; - usleep_range(400, 500); + usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100); continue; default: diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 27a2426c3..809959d56 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -53,8 +53,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int offset, int size, u8 *bytes); -static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb); +static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb); static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *port); @@ -1029,8 +1029,8 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, } } -static void build_mst_prop_path(struct drm_dp_mst_port *port, - struct drm_dp_mst_branch *mstb, +static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, + int pnum, char *proppath, size_t proppath_size) { @@ -1043,7 +1043,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port, snprintf(temp, sizeof(temp), "-%d", port_num); strlcat(proppath, temp, proppath_size); } - snprintf(temp, sizeof(temp), "-%d", port->port_num); + snprintf(temp, sizeof(temp), "-%d", pnum); strlcat(proppath, temp, proppath_size); } @@ -1107,15 +1107,14 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, drm_dp_port_teardown_pdt(port, old_pdt); ret = drm_dp_port_setup_pdt(port); - if (ret == true) { + if (ret == true) drm_dp_send_link_address(mstb->mgr, port->mstb); - port->mstb->link_address_sent = true; - } } if (created && !port->input) { char proppath[255]; - build_mst_prop_path(port, mstb, proppath, sizeof(proppath)); + + build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); if (!port->connector) { /* remove it from the port list */ @@ -1126,9 +1125,11 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, drm_dp_put_port(port); goto out; } - if (port->port_num >= 8) { + if (port->port_num >= DP_MST_LOGICAL_PORT_0) { port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); + drm_mode_connector_set_tile_property(port->connector); } + (*mstb->mgr->cbs->register_connector)(port->connector); } out: @@ -1193,17 +1194,18 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_ list_for_each_entry(port, &mstb->ports, next) { if (port->port_num == port_num) { - if (!port->mstb) { + mstb = port->mstb; + if (!mstb) { DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]); - return NULL; + goto out; } - mstb = port->mstb; break; } } } kref_get(&mstb->kref); +out: mutex_unlock(&mgr->lock); return mstb; } @@ -1213,10 +1215,9 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m { struct drm_dp_mst_port *port; struct drm_dp_mst_branch *mstb_child; - if (!mstb->link_address_sent) { + if (!mstb->link_address_sent) drm_dp_send_link_address(mgr, mstb); - mstb->link_address_sent = true; - } + list_for_each_entry(port, &mstb->ports, next) { if (port->input) continue; @@ -1469,8 +1470,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, mutex_unlock(&mgr->qlock); } -static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb) +static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb) { int len; struct drm_dp_sideband_msg_tx *txmsg; @@ -1478,11 +1479,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); if (!txmsg) - return -ENOMEM; + return; txmsg->dst = mstb; len = build_link_address(txmsg); + mstb->link_address_sent = true; drm_dp_queue_down_tx(mgr, txmsg); ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); @@ -1510,11 +1512,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, } (*mgr->cbs->hotplug)(mgr); } - } else + } else { + mstb->link_address_sent = false; DRM_DEBUG_KMS("link address failed %d\n", ret); + } kfree(txmsg); - return 0; } static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, @@ -2276,10 +2279,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ if (port->cached_edid) edid = drm_edid_duplicate(port->cached_edid); - else + else { edid = drm_get_edid(connector, &port->aux.ddc); - - drm_mode_connector_set_tile_property(connector); + drm_mode_connector_set_tile_property(connector); + } drm_dp_put_port(port); return edid; } @@ -2645,6 +2648,16 @@ void drm_dp_mst_dump_topology(struct seq_file *m, seq_printf(m, "%02x ", buf[i]); seq_printf(m, "\n"); + /* dump the standard OUI branch header */ + ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE); + seq_printf(m, "branch oui: "); + for (i = 0; i < 0x3; i++) + seq_printf(m, "%02x", buf[i]); + seq_printf(m, " devid: "); + for (i = 0x3; i < 0x8; i++) + seq_printf(m, "%c", buf[i]); + seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]); + seq_printf(m, "\n"); bret = dump_dp_payload_table(mgr, buf); if (bret == true) { seq_printf(m, "payload table: "); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index b7bf4ce8c..53d09a19f 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -285,7 +285,6 @@ static void drm_minor_free(struct drm_device *dev, unsigned int type) if (!minor) return; - drm_mode_group_destroy(&minor->mode_group); put_device(minor->kdev); spin_lock_irqsave(&drm_minor_lock, flags); @@ -582,11 +581,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, if (drm_ht_create(&dev->map_hash, 12)) goto err_minors; - ret = drm_legacy_ctxbitmap_init(dev); - if (ret) { - DRM_ERROR("Cannot allocate memory for context bitmap.\n"); - goto err_ht; - } + drm_legacy_ctxbitmap_init(dev); if (drm_core_check_feature(dev, DRIVER_GEM)) { ret = drm_gem_init(dev); @@ -600,7 +595,6 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, err_ctxbitmap: drm_legacy_ctxbitmap_cleanup(dev); -err_ht: drm_ht_remove(&dev->map_hash); err_minors: drm_minor_free(dev, DRM_MINOR_LEGACY); @@ -705,20 +699,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) goto err_minors; } - /* setup grouping for legacy outputs */ - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = drm_mode_group_init_legacy_group(dev, - &dev->primary->mode_group); - if (ret) - goto err_unload; - } - ret = 0; goto out_unlock; -err_unload: - if (dev->driver->unload) - dev->driver->unload(dev); err_minors: drm_minor_unregister(dev, DRM_MINOR_LEGACY); drm_minor_unregister(dev, DRM_MINOR_RENDER); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 7087da37d..05bb7311a 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3413,7 +3413,7 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder, WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) + drm_for_each_connector(connector, dev) if (connector->encoder == encoder && connector->eld[0]) return connector; @@ -3802,7 +3802,7 @@ int drm_add_modes_noedid(struct drm_connector *connector, struct drm_display_mode *mode; struct drm_device *dev = connector->dev; - count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); + count = ARRAY_SIZE(drm_dmt_modes); if (hdisplay < 0) hdisplay = 0; if (vdisplay < 0) diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index 5c1aca443..c19a62561 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -209,23 +209,11 @@ int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_framebuffer *fb; - int ret; - - ret = mutex_lock_interruptible(&dev->mode_config.mutex); - if (ret) - return ret; - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) { - mutex_unlock(&dev->mode_config.mutex); - return ret; - } - - list_for_each_entry(fb, &dev->mode_config.fb_list, head) + mutex_lock(&dev->mode_config.fb_lock); + drm_for_each_fb(fb, dev) drm_fb_cma_describe(fb, m); - - mutex_unlock(&dev->struct_mutex); - mutex_unlock(&dev->mode_config.mutex); + mutex_unlock(&dev->mode_config.fb_lock); return 0; } @@ -234,9 +222,9 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show); static struct fb_ops drm_fbdev_cma_ops = { .owner = THIS_MODULE, - .fb_fillrect = sys_fillrect, - .fb_copyarea = sys_copyarea, - .fb_imageblit = sys_imageblit, + .fb_fillrect = drm_fb_helper_sys_fillrect, + .fb_copyarea = drm_fb_helper_sys_copyarea, + .fb_imageblit = drm_fb_helper_sys_imageblit, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_blank = drm_fb_helper_blank, @@ -275,10 +263,9 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper, if (IS_ERR(obj)) return -ENOMEM; - fbi = framebuffer_alloc(0, dev->dev); - if (!fbi) { - dev_err(dev->dev, "Failed to allocate framebuffer info.\n"); - ret = -ENOMEM; + fbi = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(fbi)) { + ret = PTR_ERR(fbi); goto err_drm_gem_cma_free_object; } @@ -286,23 +273,16 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper, if (IS_ERR(fbdev_cma->fb)) { dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n"); ret = PTR_ERR(fbdev_cma->fb); - goto err_framebuffer_release; + goto err_fb_info_destroy; } fb = &fbdev_cma->fb->fb; helper->fb = fb; - helper->fbdev = fbi; fbi->par = helper; fbi->flags = FBINFO_FLAG_DEFAULT; fbi->fbops = &drm_fbdev_cma_ops; - ret = fb_alloc_cmap(&fbi->cmap, 256, 0); - if (ret) { - dev_err(dev->dev, "Failed to allocate color map.\n"); - goto err_drm_fb_cma_destroy; - } - drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); @@ -317,11 +297,8 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper, return 0; -err_drm_fb_cma_destroy: - drm_framebuffer_unregister_private(fb); - drm_fb_cma_destroy(fb); -err_framebuffer_release: - framebuffer_release(fbi); +err_fb_info_destroy: + drm_fb_helper_release_fbi(helper); err_drm_gem_cma_free_object: drm_gem_cma_free_object(&obj->base); return ret; @@ -397,20 +374,8 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init); */ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma) { - if (fbdev_cma->fb_helper.fbdev) { - struct fb_info *info; - int ret; - - info = fbdev_cma->fb_helper.fbdev; - ret = unregister_framebuffer(info); - if (ret < 0) - DRM_DEBUG_KMS("failed unregister_framebuffer()\n"); - - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - - framebuffer_release(info); - } + drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper); + drm_fb_helper_release_fbi(&fbdev_cma->fb_helper); if (fbdev_cma->fb) { drm_framebuffer_unregister_private(&fbdev_cma->fb->fb); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index cac422916..ca08c4723 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -56,8 +56,8 @@ static LIST_HEAD(kernel_fb_helper_list); * Teardown is done with drm_fb_helper_fini(). * * At runtime drivers should restore the fbdev console by calling - * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They - * should also notify the fb helper code from updates to the output + * drm_fb_helper_restore_fbdev_mode_unlocked() from their ->lastclose callback. + * They should also notify the fb helper code from updates to the output * configuration by calling drm_fb_helper_hotplug_event(). For easier * integration with the output polling code in drm_crtc_helper.c the modeset * code provides a ->output_poll_changed callback. @@ -89,8 +89,9 @@ static LIST_HEAD(kernel_fb_helper_list); * connectors to the fbdev, e.g. if some are reserved for special purposes or * not adequate to be used for the fbcon. * - * Since this is part of the initial setup before the fbdev is published, no - * locking is required. + * This function is protected against concurrent connector hotadds/removals + * using drm_fb_helper_add_one_connector() and + * drm_fb_helper_remove_one_connector(). */ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) { @@ -98,7 +99,8 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) struct drm_connector *connector; int i; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + mutex_lock(&dev->mode_config.mutex); + drm_for_each_connector(connector, dev) { struct drm_fb_helper_connector *fb_helper_connector; fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL); @@ -108,6 +110,7 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) fb_helper_connector->connector = connector; fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector; } + mutex_unlock(&dev->mode_config.mutex); return 0; fail: for (i = 0; i < fb_helper->connector_count; i++) { @@ -115,6 +118,8 @@ fail: fb_helper->connector_info[i] = NULL; } fb_helper->connector_count = 0; + mutex_unlock(&dev->mode_config.mutex); + return -ENOMEM; } EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors); @@ -163,11 +168,14 @@ static void remove_from_modeset(struct drm_mode_set *set, } set->num_connectors--; - /* because i915 is pissy about this.. + /* * TODO maybe need to makes sure we set it back to !=NULL somewhere? */ - if (set->num_connectors == 0) + if (set->num_connectors == 0) { set->fb = NULL; + drm_mode_destroy(connector->dev, set->mode); + set->mode = NULL; + } } int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, @@ -269,7 +277,7 @@ static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_crtc *c; - list_for_each_entry(c, &dev->mode_config.crtc_list, head) { + drm_for_each_crtc(c, dev) { if (crtc->base.id == c->base.id) return c->primary->fb; } @@ -321,7 +329,7 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper) drm_warn_on_modeset_not_all_locked(dev); - list_for_each_entry(plane, &dev->mode_config.plane_list, head) { + drm_for_each_plane(plane, dev) { if (plane->type != DRM_PLANE_TYPE_PRIMARY) drm_plane_force_disable(plane); @@ -337,7 +345,11 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper) struct drm_crtc *crtc = mode_set->crtc; int ret; - if (crtc->funcs->cursor_set) { + if (crtc->funcs->cursor_set2) { + ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); + if (ret) + error = true; + } else if (crtc->funcs->cursor_set) { ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0); if (ret) error = true; @@ -349,21 +361,6 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper) } return error; } -/** - * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration - * @fb_helper: fbcon to restore - * - * This should be called from driver's drm ->lastclose callback - * when implementing an fbcon on top of kms using this helper. This ensures that - * the user isn't greeted with a black screen when e.g. X dies. - * - * Use this variant if you need to bypass locking (panic), or already - * hold all modeset locks. Otherwise use drm_fb_helper_restore_fbdev_mode_unlocked() - */ -static bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper) -{ - return restore_fbdev_mode(fb_helper); -} /** * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration @@ -393,6 +390,31 @@ bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) } EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked); +static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper) +{ + struct drm_device *dev = fb_helper->dev; + struct drm_crtc *crtc; + int bound = 0, crtcs_bound = 0; + + /* Sometimes user space wants everything disabled, so don't steal the + * display if there's a master. */ + if (dev->primary->master) + return false; + + drm_for_each_crtc(crtc, dev) { + if (crtc->primary->fb) + crtcs_bound++; + if (crtc->primary->fb == fb_helper->fb) + bound++; + } + + if (bound < crtcs_bound) + return false; + + return true; +} + +#ifdef CONFIG_MAGIC_SYSRQ /* * restore fbcon display for all kms driver's using this helper, used for sysrq * and panic handling. @@ -411,67 +433,15 @@ static bool drm_fb_helper_force_kernel_mode(void) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) continue; - /* - * NOTE: Use trylock mode to avoid deadlocks and sleeping in - * panic context. - */ - if (__drm_modeset_lock_all(dev, true) != 0) { - error = true; - continue; - } - - ret = drm_fb_helper_restore_fbdev_mode(helper); + drm_modeset_lock_all(dev); + ret = restore_fbdev_mode(helper); if (ret) error = true; - drm_modeset_unlock_all(dev); } return error; } -static int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed, - void *panic_str) -{ - /* - * It's a waste of time and effort to switch back to text console - * if the kernel should reboot before panic messages can be seen. - */ - if (panic_timeout < 0) - return 0; - - pr_err("panic occurred, switching back to text console\n"); - return drm_fb_helper_force_kernel_mode(); -} - -static struct notifier_block paniced = { - .notifier_call = drm_fb_helper_panic, -}; - -static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper) -{ - struct drm_device *dev = fb_helper->dev; - struct drm_crtc *crtc; - int bound = 0, crtcs_bound = 0; - - /* Sometimes user space wants everything disabled, so don't steal the - * display if there's a master. */ - if (dev->primary->master) - return false; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - if (crtc->primary->fb) - crtcs_bound++; - if (crtc->primary->fb == fb_helper->fb) - bound++; - } - - if (bound < crtcs_bound) - return false; - - return true; -} - -#ifdef CONFIG_MAGIC_SYSRQ static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) { bool ret; @@ -503,14 +473,6 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) struct drm_connector *connector; int i, j; - /* - * fbdev->blank can be called from irq context in case of a panic. - * Since we already have our own special panic handler which will - * restore the fbdev console mode completely, just bail out early. - */ - if (oops_in_progress) - return; - /* * For each CRTC in this fb, turn the connectors on/off. */ @@ -544,6 +506,9 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) */ int drm_fb_helper_blank(int blank, struct fb_info *info) { + if (oops_in_progress) + return -EBUSY; + switch (blank) { /* Display: On; HSync: On, VSync: On */ case FB_BLANK_UNBLANK: @@ -655,7 +620,7 @@ int drm_fb_helper_init(struct drm_device *dev, } i = 0; - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + drm_for_each_crtc(crtc, dev) { fb_helper->crtc_info[i].mode_set.crtc = crtc; i++; } @@ -667,14 +632,91 @@ out_free: } EXPORT_SYMBOL(drm_fb_helper_init); +/** + * drm_fb_helper_alloc_fbi - allocate fb_info and some of its members + * @fb_helper: driver-allocated fbdev helper + * + * A helper to alloc fb_info and the members cmap and apertures. Called + * by the driver within the fb_probe fb_helper callback function. + * + * RETURNS: + * fb_info pointer if things went okay, pointer containing error code + * otherwise + */ +struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper) +{ + struct device *dev = fb_helper->dev->dev; + struct fb_info *info; + int ret; + + info = framebuffer_alloc(0, dev); + if (!info) + return ERR_PTR(-ENOMEM); + + ret = fb_alloc_cmap(&info->cmap, 256, 0); + if (ret) + goto err_release; + + info->apertures = alloc_apertures(1); + if (!info->apertures) { + ret = -ENOMEM; + goto err_free_cmap; + } + + fb_helper->fbdev = info; + + return info; + +err_free_cmap: + fb_dealloc_cmap(&info->cmap); +err_release: + framebuffer_release(info); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(drm_fb_helper_alloc_fbi); + +/** + * drm_fb_helper_unregister_fbi - unregister fb_info framebuffer device + * @fb_helper: driver-allocated fbdev helper + * + * A wrapper around unregister_framebuffer, to release the fb_info + * framebuffer device + */ +void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper) +{ + if (fb_helper && fb_helper->fbdev) + unregister_framebuffer(fb_helper->fbdev); +} +EXPORT_SYMBOL(drm_fb_helper_unregister_fbi); + +/** + * drm_fb_helper_release_fbi - dealloc fb_info and its members + * @fb_helper: driver-allocated fbdev helper + * + * A helper to free memory taken by fb_info and the members cmap and + * apertures + */ +void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper) +{ + if (fb_helper) { + struct fb_info *info = fb_helper->fbdev; + + if (info) { + if (info->cmap.len) + fb_dealloc_cmap(&info->cmap); + framebuffer_release(info); + } + + fb_helper->fbdev = NULL; + } +} +EXPORT_SYMBOL(drm_fb_helper_release_fbi); + void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) { if (!list_empty(&fb_helper->kernel_fb_list)) { list_del(&fb_helper->kernel_fb_list); if (list_empty(&kernel_fb_helper_list)) { - pr_info("drm: unregistered panic notifier\n"); - atomic_notifier_chain_unregister(&panic_notifier_list, - &paniced); unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); } } @@ -684,6 +726,149 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) } EXPORT_SYMBOL(drm_fb_helper_fini); +/** + * drm_fb_helper_unlink_fbi - wrapper around unlink_framebuffer + * @fb_helper: driver-allocated fbdev helper + * + * A wrapper around unlink_framebuffer implemented by fbdev core + */ +void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper) +{ + if (fb_helper && fb_helper->fbdev) + unlink_framebuffer(fb_helper->fbdev); +} +EXPORT_SYMBOL(drm_fb_helper_unlink_fbi); + +/** + * drm_fb_helper_sys_read - wrapper around fb_sys_read + * @info: fb_info struct pointer + * @buf: userspace buffer to read from framebuffer memory + * @count: number of bytes to read from framebuffer memory + * @ppos: read offset within framebuffer memory + * + * A wrapper around fb_sys_read implemented by fbdev core + */ +ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, + size_t count, loff_t *ppos) +{ + return fb_sys_read(info, buf, count, ppos); +} +EXPORT_SYMBOL(drm_fb_helper_sys_read); + +/** + * drm_fb_helper_sys_write - wrapper around fb_sys_write + * @info: fb_info struct pointer + * @buf: userspace buffer to write to framebuffer memory + * @count: number of bytes to write to framebuffer memory + * @ppos: write offset within framebuffer memory + * + * A wrapper around fb_sys_write implemented by fbdev core + */ +ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos) +{ + return fb_sys_write(info, buf, count, ppos); +} +EXPORT_SYMBOL(drm_fb_helper_sys_write); + +/** + * drm_fb_helper_sys_fillrect - wrapper around sys_fillrect + * @info: fbdev registered by the helper + * @rect: info about rectangle to fill + * + * A wrapper around sys_fillrect implemented by fbdev core + */ +void drm_fb_helper_sys_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ + sys_fillrect(info, rect); +} +EXPORT_SYMBOL(drm_fb_helper_sys_fillrect); + +/** + * drm_fb_helper_sys_copyarea - wrapper around sys_copyarea + * @info: fbdev registered by the helper + * @area: info about area to copy + * + * A wrapper around sys_copyarea implemented by fbdev core + */ +void drm_fb_helper_sys_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ + sys_copyarea(info, area); +} +EXPORT_SYMBOL(drm_fb_helper_sys_copyarea); + +/** + * drm_fb_helper_sys_imageblit - wrapper around sys_imageblit + * @info: fbdev registered by the helper + * @image: info about image to blit + * + * A wrapper around sys_imageblit implemented by fbdev core + */ +void drm_fb_helper_sys_imageblit(struct fb_info *info, + const struct fb_image *image) +{ + sys_imageblit(info, image); +} +EXPORT_SYMBOL(drm_fb_helper_sys_imageblit); + +/** + * drm_fb_helper_cfb_fillrect - wrapper around cfb_fillrect + * @info: fbdev registered by the helper + * @rect: info about rectangle to fill + * + * A wrapper around cfb_imageblit implemented by fbdev core + */ +void drm_fb_helper_cfb_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ + cfb_fillrect(info, rect); +} +EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect); + +/** + * drm_fb_helper_cfb_copyarea - wrapper around cfb_copyarea + * @info: fbdev registered by the helper + * @area: info about area to copy + * + * A wrapper around cfb_copyarea implemented by fbdev core + */ +void drm_fb_helper_cfb_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ + cfb_copyarea(info, area); +} +EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea); + +/** + * drm_fb_helper_cfb_imageblit - wrapper around cfb_imageblit + * @info: fbdev registered by the helper + * @image: info about image to blit + * + * A wrapper around cfb_imageblit implemented by fbdev core + */ +void drm_fb_helper_cfb_imageblit(struct fb_info *info, + const struct fb_image *image) +{ + cfb_imageblit(info, image); +} +EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit); + +/** + * drm_fb_helper_set_suspend - wrapper around fb_set_suspend + * @fb_helper: driver-allocated fbdev helper + * @state: desired state, zero to resume, non-zero to suspend + * + * A wrapper around fb_set_suspend implemented by fbdev core + */ +void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state) +{ + if (fb_helper && fb_helper->fbdev) + fb_set_suspend(fb_helper->fbdev, state); +} +EXPORT_SYMBOL(drm_fb_helper_set_suspend); + static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, u16 regno, struct fb_info *info) { @@ -771,9 +956,10 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) int i, j, rc = 0; int start; - if (__drm_modeset_lock_all(dev, !!oops_in_progress)) { + if (oops_in_progress) return -EBUSY; - } + + drm_modeset_lock_all(dev); if (!drm_fb_helper_is_bound(fb_helper)) { drm_modeset_unlock_all(dev); return -EBUSY; @@ -922,6 +1108,9 @@ int drm_fb_helper_set_par(struct fb_info *info) struct drm_fb_helper *fb_helper = info->par; struct fb_var_screeninfo *var = &info->var; + if (oops_in_progress) + return -EBUSY; + if (var->pixclock != 0) { DRM_ERROR("PIXEL CLOCK SET\n"); return -EINVAL; @@ -947,9 +1136,10 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, int ret = 0; int i; - if (__drm_modeset_lock_all(dev, !!oops_in_progress)) { + if (oops_in_progress) return -EBUSY; - } + + drm_modeset_lock_all(dev); if (!drm_fb_helper_is_bound(fb_helper)) { drm_modeset_unlock_all(dev); return -EBUSY; @@ -1109,12 +1299,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n", info->node, info->fix.id); - /* Switch back to kernel console on panic */ - /* multi card linked list maybe */ if (list_empty(&kernel_fb_helper_list)) { - dev_info(fb_helper->dev->dev, "registered panic notifier\n"); - atomic_notifier_chain_register(&panic_notifier_list, - &paniced); register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); } diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index e10dcfab1..bce1e3591 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -766,7 +766,7 @@ drm_gem_object_free(struct kref *kref) struct drm_gem_object *obj = (struct drm_gem_object *) kref; struct drm_device *dev = obj->dev; - BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); if (dev->driver->gem_free_object != NULL) dev->driver->gem_free_object(obj); @@ -778,22 +778,14 @@ void drm_gem_vm_open(struct vm_area_struct *vma) struct drm_gem_object *obj = vma->vm_private_data; drm_gem_object_reference(obj); - - mutex_lock(&obj->dev->struct_mutex); - drm_vm_open_locked(obj->dev, vma); - mutex_unlock(&obj->dev->struct_mutex); } EXPORT_SYMBOL(drm_gem_vm_open); void drm_gem_vm_close(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; - struct drm_device *dev = obj->dev; - mutex_lock(&dev->struct_mutex); - drm_vm_close_locked(obj->dev, vma); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(obj); } EXPORT_SYMBOL(drm_gem_vm_close); @@ -850,7 +842,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, */ drm_gem_object_reference(obj); - drm_vm_open_locked(dev, vma); return 0; } EXPORT_SYMBOL(drm_gem_mmap_obj); diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index bd75f303d..86cc793cd 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -289,20 +289,15 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, { struct drm_gem_object *gem_obj; - mutex_lock(&drm->struct_mutex); - gem_obj = drm_gem_object_lookup(drm, file_priv, handle); if (!gem_obj) { dev_err(drm->dev, "failed to lookup GEM object\n"); - mutex_unlock(&drm->struct_mutex); return -EINVAL; } *offset = drm_vma_node_offset_addr(&gem_obj->vma_node); - drm_gem_object_unreference(gem_obj); - - mutex_unlock(&drm->struct_mutex); + drm_gem_object_unreference_unlocked(gem_obj); return 0; } @@ -381,11 +376,8 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m) { struct drm_gem_object *obj = &cma_obj->base; - struct drm_device *dev = obj->dev; uint64_t off; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - off = drm_vma_node_start(&obj->vma_node); seq_printf(m, "%2d (%2d) %08llx %pad %p %zu", diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index 9cfcd0aef..ddfa6014c 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -95,7 +95,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd, return -EFAULT; version = compat_alloc_user_space(sizeof(*version)); - if (!access_ok(VERIFY_WRITE, version, sizeof(*version))) + if (!version) return -EFAULT; if (__put_user(v32.name_len, &version->name_len) || __put_user((void __user *)(unsigned long)v32.name, @@ -142,7 +142,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd, return -EFAULT; u = compat_alloc_user_space(sizeof(*u)); - if (!access_ok(VERIFY_WRITE, u, sizeof(*u))) + if (!u) return -EFAULT; if (__put_user(uq32.unique_len, &u->unique_len) || __put_user((void __user *)(unsigned long)uq32.unique, @@ -170,7 +170,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd, return -EFAULT; u = compat_alloc_user_space(sizeof(*u)); - if (!access_ok(VERIFY_WRITE, u, sizeof(*u))) + if (!u) return -EFAULT; if (__put_user(uq32.unique_len, &u->unique_len) || __put_user((void __user *)(unsigned long)uq32.unique, @@ -202,7 +202,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd, return -EFAULT; map = compat_alloc_user_space(sizeof(*map)); - if (!access_ok(VERIFY_WRITE, map, sizeof(*map))) + if (!map) return -EFAULT; if (__put_user(idx, &map->offset)) return -EFAULT; @@ -239,7 +239,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd, return -EFAULT; map = compat_alloc_user_space(sizeof(*map)); - if (!access_ok(VERIFY_WRITE, map, sizeof(*map))) + if (!map) return -EFAULT; if (__put_user(m32.offset, &map->offset) || __put_user(m32.size, &map->size) @@ -279,7 +279,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd, return -EFAULT; map = compat_alloc_user_space(sizeof(*map)); - if (!access_ok(VERIFY_WRITE, map, sizeof(*map))) + if (!map) return -EFAULT; if (__put_user((void *)(unsigned long)handle, &map->handle)) return -EFAULT; @@ -308,7 +308,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd, return -EFAULT; client = compat_alloc_user_space(sizeof(*client)); - if (!access_ok(VERIFY_WRITE, client, sizeof(*client))) + if (!client) return -EFAULT; if (__put_user(idx, &client->idx)) return -EFAULT; @@ -347,7 +347,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd, int i, err; stats = compat_alloc_user_space(sizeof(*stats)); - if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats))) + if (!stats) return -EFAULT; err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats); @@ -384,8 +384,7 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd, unsigned long agp_start; buf = compat_alloc_user_space(sizeof(*buf)); - if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)) - || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))) + if (!buf || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))) return -EFAULT; if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start)) @@ -416,7 +415,7 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd, return -EFAULT; buf = compat_alloc_user_space(sizeof(*buf)); - if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))) + if (!buf) return -EFAULT; if (__put_user(b32.size, &buf->size) @@ -457,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd, nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc); request = compat_alloc_user_space(nbytes); - if (!access_ok(VERIFY_WRITE, request, nbytes)) + if (!request) return -EFAULT; list = (struct drm_buf_desc *) (request + 1); @@ -518,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd, return -EINVAL; nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub); request = compat_alloc_user_space(nbytes); - if (!access_ok(VERIFY_WRITE, request, nbytes)) + if (!request) return -EFAULT; list = (struct drm_buf_pub *) (request + 1); @@ -565,7 +564,7 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd, return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request))) + if (!request) return -EFAULT; if (__put_user(req32.count, &request->count) || __put_user((int __user *)(unsigned long)req32.list, @@ -591,7 +590,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd, return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request))) + if (!request) return -EFAULT; if (__put_user(req32.ctx_id, &request->ctx_id) || __put_user((void *)(unsigned long)req32.handle, @@ -615,7 +614,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd, return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request))) + if (!request) return -EFAULT; if (__put_user(ctx_id, &request->ctx_id)) return -EFAULT; @@ -648,7 +647,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd, return -EFAULT; res = compat_alloc_user_space(sizeof(*res)); - if (!access_ok(VERIFY_WRITE, res, sizeof(*res))) + if (!res) return -EFAULT; if (__put_user(res32.count, &res->count) || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts, @@ -691,7 +690,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd, return -EFAULT; d = compat_alloc_user_space(sizeof(*d)); - if (!access_ok(VERIFY_WRITE, d, sizeof(*d))) + if (!d) return -EFAULT; if (__put_user(d32.context, &d->context) @@ -766,7 +765,7 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd, int err; info = compat_alloc_user_space(sizeof(*info)); - if (!access_ok(VERIFY_WRITE, info, sizeof(*info))) + if (!info) return -EFAULT; err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info); @@ -809,7 +808,7 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd, return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + if (!request || __put_user(req32.size, &request->size) || __put_user(req32.type, &request->type)) return -EFAULT; @@ -836,7 +835,7 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd, u32 handle; request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + if (!request || get_user(handle, &argp->handle) || __put_user(handle, &request->handle)) return -EFAULT; @@ -860,7 +859,7 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd, return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + if (!request || __put_user(req32.handle, &request->handle) || __put_user(req32.offset, &request->offset)) return -EFAULT; @@ -876,7 +875,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd, u32 handle; request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + if (!request || get_user(handle, &argp->handle) || __put_user(handle, &request->handle)) return -EFAULT; @@ -899,8 +898,7 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd, unsigned long x; request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) - || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)) + if (!request || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)) || __get_user(x, &argp->size) || __put_user(x, &request->size)) return -EFAULT; @@ -925,8 +923,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd, unsigned long x; request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) - || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)) + if (!request || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)) || __get_user(x, &argp->handle) || __put_user(x << PAGE_SHIFT, &request->handle)) return -EFAULT; @@ -954,7 +951,7 @@ static int compat_drm_update_draw(struct file *file, unsigned int cmd, return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) || + if (!request || __put_user(update32.handle, &request->handle) || __put_user(update32.type, &request->type) || __put_user(update32.num, &request->num) || @@ -996,7 +993,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + if (!request || __put_user(req32.request.type, &request->request.type) || __put_user(req32.request.sequence, &request->request.sequence) || __put_user(req32.request.signal, &request->request.signal)) diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index b1d303fa2..d93e7378c 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -480,7 +480,7 @@ static int drm_version(struct drm_device *dev, void *data, * indicated permissions. If so, returns zero. Otherwise returns an * error code suitable for ioctl return. */ -static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) +int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) { /* ROOT_ONLY is only for CAP_SYS_ADMIN */ if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN))) @@ -508,6 +508,7 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) return 0; } +EXPORT_SYMBOL(drm_ioctl_permit); #define DRM_IOCTL_DEF(ioctl, _func, _flags) \ [DRM_IOCTL_NR(ioctl)] = { \ @@ -519,7 +520,8 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) /** Ioctl table */ static const struct drm_ioctl_desc drm_ioctls[] = { - DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, + DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index b50fa0afd..22d207e21 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -43,8 +43,8 @@ #include /* Access macro for slots in vblank timestamp ringbuffer. */ -#define vblanktimestamp(dev, crtc, count) \ - ((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE]) +#define vblanktimestamp(dev, pipe, count) \ + ((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE]) /* Retry timestamp calculation up to 3 times to satisfy * drm_timestamp_precision before giving up. @@ -57,7 +57,7 @@ #define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000 static bool -drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, +drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, struct timeval *tvblank, unsigned flags); static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ @@ -107,7 +107,7 @@ static void store_vblank(struct drm_device *dev, int crtc, /** * drm_update_vblank_count - update the master vblank counter * @dev: DRM device - * @crtc: counter to update + * @pipe: counter to update * * Call back into the driver to update the appropriate vblank counter * (specified by @crtc). Deal with wraparound, if it occurred, and @@ -120,9 +120,9 @@ static void store_vblank(struct drm_device *dev, int crtc, * Note: caller must hold dev->vbl_lock since this reads & writes * device vblank fields. */ -static void drm_update_vblank_count(struct drm_device *dev, int crtc) +static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; u32 cur_vblank, diff; bool rc; struct timeval t_vblank; @@ -140,21 +140,21 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) * corresponding vblank timestamp. */ do { - cur_vblank = dev->driver->get_vblank_counter(dev, crtc); - rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0); - } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc)); + cur_vblank = dev->driver->get_vblank_counter(dev, pipe); + rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, 0); + } while (cur_vblank != dev->driver->get_vblank_counter(dev, pipe)); /* Deal with counter wrap */ diff = cur_vblank - vblank->last; if (cur_vblank < vblank->last) { diff += dev->max_vblank_count + 1; - DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n", - crtc, vblank->last, cur_vblank, diff); + DRM_DEBUG("last_vblank[%u]=0x%x, cur_vblank=0x%x => diff=0x%x\n", + pipe, vblank->last, cur_vblank, diff); } - DRM_DEBUG("updating vblank count on crtc %d, missed %d\n", - crtc, diff); + DRM_DEBUG("updating vblank count on crtc %u, missed %d\n", + pipe, diff); if (diff == 0) return; @@ -167,7 +167,7 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) if (!rc) t_vblank = (struct timeval) {0, 0}; - store_vblank(dev, crtc, diff, &t_vblank); + store_vblank(dev, pipe, diff, &t_vblank); } /* @@ -176,9 +176,9 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) * are preserved, even if there are any spurious vblank irq's after * disable. */ -static void vblank_disable_and_save(struct drm_device *dev, int crtc) +static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; unsigned long irqflags; u32 vblcount; s64 diff_ns; @@ -206,8 +206,8 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) * vblank interrupt is disabled. */ if (!vblank->enabled && - drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0)) { - drm_update_vblank_count(dev, crtc); + drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0)) { + drm_update_vblank_count(dev, pipe); spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); return; } @@ -218,7 +218,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) * hardware potentially runtime suspended. */ if (vblank->enabled) { - dev->driver->disable_vblank(dev, crtc); + dev->driver->disable_vblank(dev, pipe); vblank->enabled = false; } @@ -235,9 +235,9 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) * delayed gpu counter increment. */ do { - vblank->last = dev->driver->get_vblank_counter(dev, crtc); - vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0); - } while (vblank->last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc); + vblank->last = dev->driver->get_vblank_counter(dev, pipe); + vblrc = drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0); + } while (vblank->last != dev->driver->get_vblank_counter(dev, pipe) && (--count) && vblrc); if (!count) vblrc = 0; @@ -247,7 +247,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) */ vblcount = vblank->count; diff_ns = timeval_to_ns(&tvblank) - - timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); + timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount)); /* If there is at least 1 msec difference between the last stored * timestamp and tvblank, then we are currently executing our @@ -262,7 +262,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) * hope for the best. */ if (vblrc && (abs64(diff_ns) > 1000000)) - store_vblank(dev, crtc, 1, &tvblank); + store_vblank(dev, pipe, 1, &tvblank); spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); } @@ -271,16 +271,16 @@ static void vblank_disable_fn(unsigned long arg) { struct drm_vblank_crtc *vblank = (void *)arg; struct drm_device *dev = vblank->dev; + unsigned int pipe = vblank->pipe; unsigned long irqflags; - int crtc = vblank->crtc; if (!dev->vblank_disable_allowed) return; spin_lock_irqsave(&dev->vbl_lock, irqflags); if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) { - DRM_DEBUG("disabling vblank on crtc %d\n", crtc); - vblank_disable_and_save(dev, crtc); + DRM_DEBUG("disabling vblank on crtc %u\n", pipe); + vblank_disable_and_save(dev, pipe); } spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } @@ -293,14 +293,14 @@ static void vblank_disable_fn(unsigned long arg) */ void drm_vblank_cleanup(struct drm_device *dev) { - int crtc; + unsigned int pipe; /* Bail if the driver didn't call drm_vblank_init() */ if (dev->num_crtcs == 0) return; - for (crtc = 0; crtc < dev->num_crtcs; crtc++) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + for (pipe = 0; pipe < dev->num_crtcs; pipe++) { + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; WARN_ON(vblank->enabled && drm_core_check_feature(dev, DRIVER_MODESET)); @@ -316,17 +316,18 @@ EXPORT_SYMBOL(drm_vblank_cleanup); /** * drm_vblank_init - initialize vblank support - * @dev: drm_device - * @num_crtcs: number of crtcs supported by @dev + * @dev: DRM device + * @num_crtcs: number of CRTCs supported by @dev * * This function initializes vblank support for @num_crtcs display pipelines. * * Returns: * Zero on success or a negative error code on failure. */ -int drm_vblank_init(struct drm_device *dev, int num_crtcs) +int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs) { - int i, ret = -ENOMEM; + int ret = -ENOMEM; + unsigned int i; spin_lock_init(&dev->vbl_lock); spin_lock_init(&dev->vblank_time_lock); @@ -341,7 +342,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) struct drm_vblank_crtc *vblank = &dev->vblank[i]; vblank->dev = dev; - vblank->crtc = i; + vblank->pipe = i; init_waitqueue_head(&vblank->queue); setup_timer(&vblank->disable_timer, vblank_disable_fn, (unsigned long)vblank); @@ -624,17 +625,17 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc, if (mode->flags & DRM_MODE_FLAG_INTERLACE) framedur_ns /= 2; } else - DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n", + DRM_ERROR("crtc %u: Can't calculate constants, dotclock = 0!\n", crtc->base.id); crtc->pixeldur_ns = pixeldur_ns; crtc->linedur_ns = linedur_ns; crtc->framedur_ns = framedur_ns; - DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n", + DRM_DEBUG("crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n", crtc->base.id, mode->crtc_htotal, mode->crtc_vtotal, mode->crtc_vdisplay); - DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n", + DRM_DEBUG("crtc %u: clock %d kHz framedur %d linedur %d, pixeldur %d\n", crtc->base.id, dotclock, framedur_ns, linedur_ns, pixeldur_ns); } @@ -643,7 +644,7 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants); /** * drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper * @dev: DRM device - * @crtc: Which CRTC's vblank timestamp to retrieve + * @pipe: index of CRTC whose vblank timestamp to retrieve * @max_error: Desired maximum allowable error in timestamps (nanosecs) * On return contains true maximum error of timestamp * @vblank_time: Pointer to struct timeval which should receive the timestamp @@ -686,7 +687,8 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants); * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval. * */ -int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, +int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, + unsigned int pipe, int *max_error, struct timeval *vblank_time, unsigned flags, @@ -700,8 +702,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns; bool invbl; - if (crtc < 0 || crtc >= dev->num_crtcs) { - DRM_ERROR("Invalid crtc %d\n", crtc); + if (pipe >= dev->num_crtcs) { + DRM_ERROR("Invalid crtc %u\n", pipe); return -EINVAL; } @@ -720,7 +722,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, * Happens during initial modesetting of a crtc. */ if (framedur_ns == 0) { - DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc); + DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe); return -EAGAIN; } @@ -736,13 +738,13 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, * Get vertical and horizontal scanout position vpos, hpos, * and bounding timestamps stime, etime, pre/post query. */ - vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos, + vbl_status = dev->driver->get_scanout_position(dev, pipe, flags, &vpos, &hpos, &stime, &etime); /* Return as no-op if scanout query unsupported or failed. */ if (!(vbl_status & DRM_SCANOUTPOS_VALID)) { - DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n", - crtc, vbl_status); + DRM_DEBUG("crtc %u : scanoutpos query failed [%d].\n", + pipe, vbl_status); return -EIO; } @@ -756,8 +758,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, /* Noisy system timing? */ if (i == DRM_TIMESTAMP_MAXRETRIES) { - DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n", - crtc, duration_ns/1000, *max_error/1000, i); + DRM_DEBUG("crtc %u: Noisy timestamp %d us > %d us [%d reps].\n", + pipe, duration_ns/1000, *max_error/1000, i); } /* Return upper bound of timestamp precision error. */ @@ -790,8 +792,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, etime = ktime_sub_ns(etime, delta_ns); *vblank_time = ktime_to_timeval(etime); - DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", - crtc, (int)vbl_status, hpos, vpos, + DRM_DEBUG("crtc %u : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", + pipe, (int)vbl_status, hpos, vpos, (long)tv_etime.tv_sec, (long)tv_etime.tv_usec, (long)vblank_time->tv_sec, (long)vblank_time->tv_usec, duration_ns/1000, i); @@ -816,7 +818,7 @@ static struct timeval get_drm_timestamp(void) * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent * vblank interval * @dev: DRM device - * @crtc: which CRTC's vblank timestamp to retrieve + * @pipe: index of CRTC whose vblank timestamp to retrieve * @tvblank: Pointer to target struct timeval which should receive the timestamp * @flags: Flags to pass to driver: * 0 = Default, @@ -833,7 +835,7 @@ static struct timeval get_drm_timestamp(void) * True if timestamp is considered to be very precise, false otherwise. */ static bool -drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, +drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, struct timeval *tvblank, unsigned flags) { int ret; @@ -843,7 +845,7 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, /* Query driver if possible and precision timestamping enabled. */ if (dev->driver->get_vblank_timestamp && (max_error > 0)) { - ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error, + ret = dev->driver->get_vblank_timestamp(dev, pipe, &max_error, tvblank, flags); if (ret > 0) return true; @@ -860,7 +862,7 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, /** * drm_vblank_count - retrieve "cooked" vblank counter value * @dev: DRM device - * @crtc: which counter to retrieve + * @pipe: index of CRTC for which to retrieve the counter * * Fetches the "cooked" vblank count value that represents the number of * vblank events since the system was booted, including lost events due to @@ -871,12 +873,13 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, * Returns: * The software vblank counter. */ -u32 drm_vblank_count(struct drm_device *dev, int crtc) +u32 drm_vblank_count(struct drm_device *dev, int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; - if (WARN_ON(crtc >= dev->num_crtcs)) + if (WARN_ON(pipe >= dev->num_crtcs)) return 0; + return vblank->count; } EXPORT_SYMBOL(drm_vblank_count); @@ -901,11 +904,10 @@ u32 drm_crtc_vblank_count(struct drm_crtc *crtc) EXPORT_SYMBOL(drm_crtc_vblank_count); /** - * drm_vblank_count_and_time - retrieve "cooked" vblank counter value - * and the system timestamp corresponding to that vblank counter value. - * + * drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the + * system timestamp corresponding to that vblank counter value. * @dev: DRM device - * @crtc: which counter to retrieve + * @pipe: index of CRTC whose counter to retrieve * @vblanktime: Pointer to struct timeval to receive the vblank timestamp. * * Fetches the "cooked" vblank count value that represents the number of @@ -913,13 +915,13 @@ EXPORT_SYMBOL(drm_crtc_vblank_count); * modesetting activity. Returns corresponding system timestamp of the time * of the vblank interval that corresponds to the current vblank counter value. */ -u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, +u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, struct timeval *vblanktime) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; u32 cur_vblank; - if (WARN_ON(crtc >= dev->num_crtcs)) + if (WARN_ON(pipe >= dev->num_crtcs)) return 0; /* @@ -930,7 +932,7 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, do { cur_vblank = vblank->count; smp_rmb(); - *vblanktime = vblanktimestamp(dev, crtc, cur_vblank); + *vblanktime = vblanktimestamp(dev, pipe, cur_vblank); smp_rmb(); } while (cur_vblank != vblank->count); @@ -957,7 +959,7 @@ static void send_vblank_event(struct drm_device *dev, /** * drm_send_vblank_event - helper to send vblank event after pageflip * @dev: DRM device - * @crtc: CRTC in question + * @pipe: CRTC index * @e: the event to send * * Updates sequence # and timestamp on event, and sends it to userspace. @@ -965,20 +967,20 @@ static void send_vblank_event(struct drm_device *dev, * * This is the legacy version of drm_crtc_send_vblank_event(). */ -void drm_send_vblank_event(struct drm_device *dev, int crtc, - struct drm_pending_vblank_event *e) +void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe, + struct drm_pending_vblank_event *e) { struct timeval now; unsigned int seq; - if (crtc >= 0) { - seq = drm_vblank_count_and_time(dev, crtc, &now); + if (dev->num_crtcs > 0) { + seq = drm_vblank_count_and_time(dev, pipe, &now); } else { seq = 0; now = get_drm_timestamp(); } - e->pipe = crtc; + e->pipe = pipe; send_vblank_event(dev, e, seq, &now); } EXPORT_SYMBOL(drm_send_vblank_event); @@ -1003,11 +1005,14 @@ EXPORT_SYMBOL(drm_crtc_send_vblank_event); /** * drm_vblank_enable - enable the vblank interrupt on a CRTC * @dev: DRM device - * @crtc: CRTC in question + * @pipe: CRTC index + * + * Returns: + * Zero on success or a negative error code on failure. */ -static int drm_vblank_enable(struct drm_device *dev, int crtc) +static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; int ret = 0; assert_spin_locked(&dev->vbl_lock); @@ -1022,13 +1027,13 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc) * timestamps. Filtercode in drm_handle_vblank() will * prevent double-accounting of same vblank interval. */ - ret = dev->driver->enable_vblank(dev, crtc); - DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); + ret = dev->driver->enable_vblank(dev, pipe); + DRM_DEBUG("enabling vblank on crtc %u, ret: %d\n", pipe, ret); if (ret) atomic_dec(&vblank->refcount); else { vblank->enabled = true; - drm_update_vblank_count(dev, crtc); + drm_update_vblank_count(dev, pipe); } } @@ -1040,7 +1045,7 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc) /** * drm_vblank_get - get a reference count on vblank events * @dev: DRM device - * @crtc: which CRTC to own + * @pipe: index of CRTC to own * * Acquire a reference count on vblank events to avoid having them disabled * while in use. @@ -1048,24 +1053,24 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc) * This is the legacy version of drm_crtc_vblank_get(). * * Returns: - * Zero on success, nonzero on failure. + * Zero on success or a negative error code on failure. */ -int drm_vblank_get(struct drm_device *dev, int crtc) +int drm_vblank_get(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; unsigned long irqflags; int ret = 0; if (!dev->num_crtcs) return -EINVAL; - if (WARN_ON(crtc >= dev->num_crtcs)) + if (WARN_ON(pipe >= dev->num_crtcs)) return -EINVAL; spin_lock_irqsave(&dev->vbl_lock, irqflags); /* Going from 0->1 means we have to enable interrupts again */ if (atomic_add_return(1, &vblank->refcount) == 1) { - ret = drm_vblank_enable(dev, crtc); + ret = drm_vblank_enable(dev, pipe); } else { if (!vblank->enabled) { atomic_dec(&vblank->refcount); @@ -1088,7 +1093,7 @@ EXPORT_SYMBOL(drm_vblank_get); * This is the native kms version of drm_vblank_get(). * * Returns: - * Zero on success, nonzero on failure. + * Zero on success or a negative error code on failure. */ int drm_crtc_vblank_get(struct drm_crtc *crtc) { @@ -1097,23 +1102,23 @@ int drm_crtc_vblank_get(struct drm_crtc *crtc) EXPORT_SYMBOL(drm_crtc_vblank_get); /** - * drm_vblank_put - give up ownership of vblank events + * drm_vblank_put - release ownership of vblank events * @dev: DRM device - * @crtc: which counter to give up + * @pipe: index of CRTC to release * * Release ownership of a given vblank counter, turning off interrupts * if possible. Disable interrupts after drm_vblank_offdelay milliseconds. * * This is the legacy version of drm_crtc_vblank_put(). */ -void drm_vblank_put(struct drm_device *dev, int crtc) +void drm_vblank_put(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; - if (WARN_ON(atomic_read(&vblank->refcount) == 0)) + if (WARN_ON(pipe >= dev->num_crtcs)) return; - if (WARN_ON(crtc >= dev->num_crtcs)) + if (WARN_ON(atomic_read(&vblank->refcount) == 0)) return; /* Last user schedules interrupt disable */ @@ -1147,30 +1152,34 @@ EXPORT_SYMBOL(drm_crtc_vblank_put); /** * drm_wait_one_vblank - wait for one vblank * @dev: DRM device - * @crtc: crtc index + * @pipe: CRTC index * * This waits for one vblank to pass on @crtc, using the irq driver interfaces. * It is a failure to call this when the vblank irq for @crtc is disabled, e.g. * due to lack of driver support or because the crtc is off. */ -void drm_wait_one_vblank(struct drm_device *dev, int crtc) +void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe) { + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; int ret; u32 last; - ret = drm_vblank_get(dev, crtc); - if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", crtc, ret)) + if (WARN_ON(pipe >= dev->num_crtcs)) + return; + + ret = drm_vblank_get(dev, pipe); + if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", pipe, ret)) return; - last = drm_vblank_count(dev, crtc); + last = drm_vblank_count(dev, pipe); - ret = wait_event_timeout(dev->vblank[crtc].queue, - last != drm_vblank_count(dev, crtc), + ret = wait_event_timeout(vblank->queue, + last != drm_vblank_count(dev, pipe), msecs_to_jiffies(100)); - WARN(ret == 0, "vblank wait timed out on crtc %i\n", crtc); + WARN(ret == 0, "vblank wait timed out on crtc %i\n", pipe); - drm_vblank_put(dev, crtc); + drm_vblank_put(dev, pipe); } EXPORT_SYMBOL(drm_wait_one_vblank); @@ -1191,7 +1200,7 @@ EXPORT_SYMBOL(drm_crtc_wait_one_vblank); /** * drm_vblank_off - disable vblank events on a CRTC * @dev: DRM device - * @crtc: CRTC in question + * @pipe: CRTC index * * Drivers can use this function to shut down the vblank interrupt handling when * disabling a crtc. This function ensures that the latest vblank frame count is @@ -1202,21 +1211,21 @@ EXPORT_SYMBOL(drm_crtc_wait_one_vblank); * * This is the legacy version of drm_crtc_vblank_off(). */ -void drm_vblank_off(struct drm_device *dev, int crtc) +void drm_vblank_off(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; struct drm_pending_vblank_event *e, *t; struct timeval now; unsigned long irqflags; unsigned int seq; - if (WARN_ON(crtc >= dev->num_crtcs)) + if (WARN_ON(pipe >= dev->num_crtcs)) return; spin_lock_irqsave(&dev->event_lock, irqflags); spin_lock(&dev->vbl_lock); - vblank_disable_and_save(dev, crtc); + vblank_disable_and_save(dev, pipe); wake_up(&vblank->queue); /* @@ -1230,16 +1239,16 @@ void drm_vblank_off(struct drm_device *dev, int crtc) spin_unlock(&dev->vbl_lock); /* Send any queued vblank events, lest the natives grow disquiet */ - seq = drm_vblank_count_and_time(dev, crtc, &now); + seq = drm_vblank_count_and_time(dev, pipe, &now); list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { - if (e->pipe != crtc) + if (e->pipe != pipe) continue; DRM_DEBUG("Sending premature vblank event on disable: \ wanted %d, current %d\n", e->event.sequence, seq); list_del(&e->base.link); - drm_vblank_put(dev, e->pipe); + drm_vblank_put(dev, pipe); send_vblank_event(dev, e, seq, &now); } spin_unlock_irqrestore(&dev->event_lock, irqflags); @@ -1267,7 +1276,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_off); /** * drm_crtc_vblank_reset - reset vblank state to off on a CRTC - * @crtc: CRTC in question + * @drm_crtc: CRTC in question * * Drivers can use this function to reset the vblank state to off at load time. * Drivers should use this together with the drm_crtc_vblank_off() and @@ -1300,7 +1309,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_reset); /** * drm_vblank_on - enable vblank events on a CRTC * @dev: DRM device - * @crtc: CRTC in question + * @pipe: CRTC index * * This functions restores the vblank interrupt state captured with * drm_vblank_off() again. Note that calls to drm_vblank_on() and @@ -1309,12 +1318,12 @@ EXPORT_SYMBOL(drm_crtc_vblank_reset); * * This is the legacy version of drm_crtc_vblank_on(). */ -void drm_vblank_on(struct drm_device *dev, int crtc) +void drm_vblank_on(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; unsigned long irqflags; - if (WARN_ON(crtc >= dev->num_crtcs)) + if (WARN_ON(pipe >= dev->num_crtcs)) return; spin_lock_irqsave(&dev->vbl_lock, irqflags); @@ -1332,7 +1341,7 @@ void drm_vblank_on(struct drm_device *dev, int crtc) * vblank counter value before and after a modeset */ vblank->last = - (dev->driver->get_vblank_counter(dev, crtc) - 1) & + (dev->driver->get_vblank_counter(dev, pipe) - 1) & dev->max_vblank_count; /* * re-enable interrupts if there are users left, or the @@ -1340,7 +1349,7 @@ void drm_vblank_on(struct drm_device *dev, int crtc) */ if (atomic_read(&vblank->refcount) != 0 || (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0)) - WARN_ON(drm_vblank_enable(dev, crtc)); + WARN_ON(drm_vblank_enable(dev, pipe)); spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } EXPORT_SYMBOL(drm_vblank_on); @@ -1365,7 +1374,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_on); /** * drm_vblank_pre_modeset - account for vblanks across mode sets * @dev: DRM device - * @crtc: CRTC in question + * @pipe: CRTC index * * Account for vblank events across mode setting events, which will likely * reset the hardware frame counter. @@ -1385,15 +1394,15 @@ EXPORT_SYMBOL(drm_crtc_vblank_on); * Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc * again. */ -void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) +void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; /* vblank is not initialized (IRQ not installed ?), or has been freed */ if (!dev->num_crtcs) return; - if (WARN_ON(crtc >= dev->num_crtcs)) + if (WARN_ON(pipe >= dev->num_crtcs)) return; /* @@ -1405,7 +1414,7 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) */ if (!vblank->inmodeset) { vblank->inmodeset = 0x1; - if (drm_vblank_get(dev, crtc) == 0) + if (drm_vblank_get(dev, pipe) == 0) vblank->inmodeset |= 0x2; } } @@ -1414,27 +1423,30 @@ EXPORT_SYMBOL(drm_vblank_pre_modeset); /** * drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes * @dev: DRM device - * @crtc: CRTC in question + * @pipe: CRTC index * * This function again drops the temporary vblank reference acquired in * drm_vblank_pre_modeset. */ -void drm_vblank_post_modeset(struct drm_device *dev, int crtc) +void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; unsigned long irqflags; /* vblank is not initialized (IRQ not installed ?), or has been freed */ if (!dev->num_crtcs) return; + if (WARN_ON(pipe >= dev->num_crtcs)) + return; + if (vblank->inmodeset) { spin_lock_irqsave(&dev->vbl_lock, irqflags); dev->vblank_disable_allowed = true; spin_unlock_irqrestore(&dev->vbl_lock, irqflags); if (vblank->inmodeset & 0x2) - drm_vblank_put(dev, crtc); + drm_vblank_put(dev, pipe); vblank->inmodeset = 0; } @@ -1456,7 +1468,7 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_modeset_ctl *modeset = data; - unsigned int crtc; + unsigned int pipe; /* If drm_vblank_init() hasn't been called yet, just no-op */ if (!dev->num_crtcs) @@ -1466,16 +1478,16 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, if (drm_core_check_feature(dev, DRIVER_MODESET)) return 0; - crtc = modeset->crtc; - if (crtc >= dev->num_crtcs) + pipe = modeset->crtc; + if (pipe >= dev->num_crtcs) return -EINVAL; switch (modeset->cmd) { case _DRM_PRE_MODESET: - drm_vblank_pre_modeset(dev, crtc); + drm_vblank_pre_modeset(dev, pipe); break; case _DRM_POST_MODESET: - drm_vblank_post_modeset(dev, crtc); + drm_vblank_post_modeset(dev, pipe); break; default: return -EINVAL; @@ -1484,7 +1496,7 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, return 0; } -static int drm_queue_vblank_event(struct drm_device *dev, int pipe, +static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe, union drm_wait_vblank *vblwait, struct drm_file *file_priv) { @@ -1538,7 +1550,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, vblwait->reply.sequence = vblwait->request.sequence; } - DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n", + DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n", vblwait->request.sequence, seq, pipe); trace_drm_vblank_event_queued(current->pid, pipe, @@ -1587,7 +1599,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_vblank_crtc *vblank; union drm_wait_vblank *vblwait = data; int ret; - unsigned int flags, seq, crtc, high_crtc; + unsigned int flags, seq, pipe, high_pipe; if (!dev->irq_enabled) return -EINVAL; @@ -1606,22 +1618,22 @@ int drm_wait_vblank(struct drm_device *dev, void *data, } flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; - high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK); - if (high_crtc) - crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT; + high_pipe = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK); + if (high_pipe) + pipe = high_pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT; else - crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; - if (crtc >= dev->num_crtcs) + pipe = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; + if (pipe >= dev->num_crtcs) return -EINVAL; - vblank = &dev->vblank[crtc]; + vblank = &dev->vblank[pipe]; - ret = drm_vblank_get(dev, crtc); + ret = drm_vblank_get(dev, pipe); if (ret) { DRM_DEBUG("failed to acquire vblank counter, %d\n", ret); return ret; } - seq = drm_vblank_count(dev, crtc); + seq = drm_vblank_count(dev, pipe); switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { case _DRM_VBLANK_RELATIVE: @@ -1638,7 +1650,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, /* must hold on to the vblank ref until the event fires * drm_vblank_put will be called asynchronously */ - return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); + return drm_queue_vblank_event(dev, pipe, vblwait, file_priv); } if ((flags & _DRM_VBLANK_NEXTONMISS) && @@ -1646,11 +1658,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data, vblwait->request.sequence = seq + 1; } - DRM_DEBUG("waiting on vblank count %d, crtc %d\n", - vblwait->request.sequence, crtc); + DRM_DEBUG("waiting on vblank count %d, crtc %u\n", + vblwait->request.sequence, pipe); vblank->last_wait = vblwait->request.sequence; DRM_WAIT_ON(ret, vblank->queue, 3 * HZ, - (((drm_vblank_count(dev, crtc) - + (((drm_vblank_count(dev, pipe) - vblwait->request.sequence) <= (1 << 23)) || !vblank->enabled || !dev->irq_enabled)); @@ -1658,7 +1670,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, if (ret != -EINTR) { struct timeval now; - vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now); + vblwait->reply.sequence = drm_vblank_count_and_time(dev, pipe, &now); vblwait->reply.tval_sec = now.tv_sec; vblwait->reply.tval_usec = now.tv_usec; @@ -1669,11 +1681,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data, } done: - drm_vblank_put(dev, crtc); + drm_vblank_put(dev, pipe); return ret; } -static void drm_handle_vblank_events(struct drm_device *dev, int crtc) +static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe) { struct drm_pending_vblank_event *e, *t; struct timeval now; @@ -1681,10 +1693,10 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc) assert_spin_locked(&dev->event_lock); - seq = drm_vblank_count_and_time(dev, crtc, &now); + seq = drm_vblank_count_and_time(dev, pipe, &now); list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { - if (e->pipe != crtc) + if (e->pipe != pipe) continue; if ((seq - e->event.sequence) > (1<<23)) continue; @@ -1693,26 +1705,26 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc) e->event.sequence, seq); list_del(&e->base.link); - drm_vblank_put(dev, e->pipe); + drm_vblank_put(dev, pipe); send_vblank_event(dev, e, seq, &now); } - trace_drm_vblank_event(crtc, seq); + trace_drm_vblank_event(pipe, seq); } /** * drm_handle_vblank - handle a vblank event * @dev: DRM device - * @crtc: where this event occurred + * @pipe: index of CRTC where this event occurred * * Drivers should call this routine in their vblank interrupt handlers to * update the vblank counter and send any signals that may be pending. * * This is the legacy version of drm_crtc_handle_vblank(). */ -bool drm_handle_vblank(struct drm_device *dev, int crtc) +bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe) { - struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; u32 vblcount; s64 diff_ns; struct timeval tvblank; @@ -1721,7 +1733,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) if (WARN_ON_ONCE(!dev->num_crtcs)) return false; - if (WARN_ON(crtc >= dev->num_crtcs)) + if (WARN_ON(pipe >= dev->num_crtcs)) return false; spin_lock_irqsave(&dev->event_lock, irqflags); @@ -1745,11 +1757,11 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) /* Get current timestamp and count. */ vblcount = vblank->count; - drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ); + drm_get_last_vbltimestamp(dev, pipe, &tvblank, DRM_CALLED_FROM_VBLIRQ); /* Compute time difference to timestamp of last vblank */ diff_ns = timeval_to_ns(&tvblank) - - timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); + timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount)); /* Update vblank timestamp and count if at least * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds @@ -1761,15 +1773,15 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) * ignore those for accounting. */ if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) - store_vblank(dev, crtc, 1, &tvblank); + store_vblank(dev, pipe, 1, &tvblank); else - DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", - crtc, (int) diff_ns); + DRM_DEBUG("crtc %u: Redundant vblirq ignored. diff_ns = %d\n", + pipe, (int) diff_ns); spin_unlock(&dev->vblank_time_lock); wake_up(&vblank->queue); - drm_handle_vblank_events(dev, crtc); + drm_handle_vblank_events(dev, pipe); spin_unlock_irqrestore(&dev->event_lock, irqflags); diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h index c1dc61473..9b731786e 100644 --- a/drivers/gpu/drm/drm_legacy.h +++ b/drivers/gpu/drm/drm_legacy.h @@ -42,7 +42,7 @@ struct drm_file; #define DRM_KERNEL_CONTEXT 0 #define DRM_RESERVED_CONTEXTS 1 -int drm_legacy_ctxbitmap_init(struct drm_device *dev); +void drm_legacy_ctxbitmap_init(struct drm_device *dev); void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev); void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle); void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file); diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index c0a5cd8c5..fba321ca4 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -55,41 +55,27 @@ * drm_modeset_acquire_fini(&ctx); */ - /** - * __drm_modeset_lock_all - internal helper to grab all modeset locks - * @dev: DRM device - * @trylock: trylock mode for atomic contexts - * - * This is a special version of drm_modeset_lock_all() which can also be used in - * atomic contexts. Then @trylock must be set to true. + * drm_modeset_lock_all - take all modeset locks + * @dev: drm device * - * Returns: - * 0 on success or negative error code on failure. + * This function takes all modeset locks, suitable where a more fine-grained + * scheme isn't (yet) implemented. Locks must be dropped with + * drm_modeset_unlock_all. */ -int __drm_modeset_lock_all(struct drm_device *dev, - bool trylock) +void drm_modeset_lock_all(struct drm_device *dev) { struct drm_mode_config *config = &dev->mode_config; struct drm_modeset_acquire_ctx *ctx; int ret; - ctx = kzalloc(sizeof(*ctx), - trylock ? GFP_ATOMIC : GFP_KERNEL); - if (!ctx) - return -ENOMEM; + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (WARN_ON(!ctx)) + return; - if (trylock) { - if (!mutex_trylock(&config->mutex)) { - ret = -EBUSY; - goto out; - } - } else { - mutex_lock(&config->mutex); - } + mutex_lock(&config->mutex); drm_modeset_acquire_init(ctx, 0); - ctx->trylock_only = trylock; retry: ret = drm_modeset_lock(&config->connection_mutex, ctx); @@ -108,7 +94,7 @@ retry: drm_warn_on_modeset_not_all_locked(dev); - return 0; + return; fail: if (ret == -EDEADLK) { @@ -116,23 +102,7 @@ fail: goto retry; } -out: kfree(ctx); - return ret; -} -EXPORT_SYMBOL(__drm_modeset_lock_all); - -/** - * drm_modeset_lock_all - take all modeset locks - * @dev: drm device - * - * This function takes all modeset locks, suitable where a more fine-grained - * scheme isn't (yet) implemented. Locks must be dropped with - * drm_modeset_unlock_all. - */ -void drm_modeset_lock_all(struct drm_device *dev) -{ - WARN_ON(__drm_modeset_lock_all(dev, false) != 0); } EXPORT_SYMBOL(drm_modeset_lock_all); @@ -276,7 +246,7 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev) if (oops_in_progress) return; - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + drm_for_each_crtc(crtc, dev) WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); @@ -464,18 +434,17 @@ EXPORT_SYMBOL(drm_modeset_unlock); int drm_modeset_lock_all_crtcs(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx) { - struct drm_mode_config *config = &dev->mode_config; struct drm_crtc *crtc; struct drm_plane *plane; int ret = 0; - list_for_each_entry(crtc, &config->crtc_list, head) { + drm_for_each_crtc(crtc, dev) { ret = drm_modeset_lock(&crtc->mutex, ctx); if (ret) return ret; } - list_for_each_entry(plane, &config->plane_list, head) { + drm_for_each_plane(plane, dev) { ret = drm_modeset_lock(&plane->mutex, ctx); if (ret) return ret; diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index aaa130736..be3884073 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c @@ -19,7 +19,7 @@ static uint32_t drm_crtc_port_mask(struct drm_device *dev, unsigned int index = 0; struct drm_crtc *tmp; - list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) { + drm_for_each_crtc(tmp, dev) { if (tmp->port == port) return 1 << index; diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 2f0ed1102..5e5a07af0 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -91,13 +91,14 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc, */ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) + drm_for_each_connector(connector, dev) { if (connector->encoder && connector->encoder->crtc == crtc) { if (connector_list != NULL && count < num_connectors) *(connector_list++) = connector; count++; } + } return count; } @@ -436,7 +437,7 @@ int drm_plane_helper_commit(struct drm_plane *plane, for (i = 0; i < 2; i++) { if (crtc_funcs[i] && crtc_funcs[i]->atomic_begin) - crtc_funcs[i]->atomic_begin(crtc[i]); + crtc_funcs[i]->atomic_begin(crtc[i], crtc[i]->state); } /* @@ -451,7 +452,7 @@ int drm_plane_helper_commit(struct drm_plane *plane, for (i = 0; i < 2; i++) { if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush) - crtc_funcs[i]->atomic_flush(crtc[i]); + crtc_funcs[i]->atomic_flush(crtc[i], crtc[i]->state); } /* @@ -525,10 +526,12 @@ int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc, if (plane->funcs->atomic_duplicate_state) plane_state = plane->funcs->atomic_duplicate_state(plane); - else if (plane->state) + else { + if (!plane->state) + drm_atomic_helper_plane_reset(plane); + plane_state = drm_atomic_helper_plane_duplicate_state(plane); - else - plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL); + } if (!plane_state) return -ENOMEM; plane_state->plane = plane; @@ -572,10 +575,12 @@ int drm_plane_helper_disable(struct drm_plane *plane) if (plane->funcs->atomic_duplicate_state) plane_state = plane->funcs->atomic_duplicate_state(plane); - else if (plane->state) + else { + if (!plane->state) + drm_atomic_helper_plane_reset(plane); + plane_state = drm_atomic_helper_plane_duplicate_state(plane); - else - plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL); + } if (!plane_state) return -ENOMEM; plane_state->plane = plane; diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 04203c0d2..a18164f2f 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -93,6 +93,40 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector) return 1; } +#define DRM_OUTPUT_POLL_PERIOD (10*HZ) +/** + * drm_kms_helper_poll_enable_locked - re-enable output polling. + * @dev: drm_device + * + * This function re-enables the output polling work without + * locking the mode_config mutex. + * + * This is like drm_kms_helper_poll_enable() however it is to be + * called from a context where the mode_config mutex is locked + * already. + */ +void drm_kms_helper_poll_enable_locked(struct drm_device *dev) +{ + bool poll = false; + struct drm_connector *connector; + + WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); + + if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll) + return; + + drm_for_each_connector(connector, dev) { + if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT)) + poll = true; + } + + if (poll) + schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); +} +EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked); + + static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, uint32_t maxX, uint32_t maxY, bool merge_type_bits) { @@ -153,7 +187,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect /* Re-enable polling in case the global poll config changed. */ if (drm_kms_helper_poll != dev->mode_config.poll_running) - drm_kms_helper_poll_enable(dev); + drm_kms_helper_poll_enable_locked(dev); dev->mode_config.poll_running = drm_kms_helper_poll; @@ -295,7 +329,6 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev) } EXPORT_SYMBOL(drm_kms_helper_hotplug_event); -#define DRM_OUTPUT_POLL_PERIOD (10*HZ) static void output_poll_execute(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); @@ -312,7 +345,7 @@ static void output_poll_execute(struct work_struct *work) goto out; mutex_lock(&dev->mode_config.mutex); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_for_each_connector(connector, dev) { /* Ignore forced connectors. */ if (connector->force) @@ -407,20 +440,9 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable); */ void drm_kms_helper_poll_enable(struct drm_device *dev) { - bool poll = false; - struct drm_connector *connector; - - if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll) - return; - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | - DRM_CONNECTOR_POLL_DISCONNECT)) - poll = true; - } - - if (poll) - schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); + mutex_lock(&dev->mode_config.mutex); + drm_kms_helper_poll_enable_locked(dev); + mutex_unlock(&dev->mode_config.mutex); } EXPORT_SYMBOL(drm_kms_helper_poll_enable); @@ -495,7 +517,7 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev) return false; mutex_lock(&dev->mode_config.mutex); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_for_each_connector(connector, dev) { /* Only handle HPD capable connectors. */ if (!(connector->polled & DRM_CONNECTOR_POLL_HPD)) diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 43003c4ad..bd1a4156f 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -56,7 +56,7 @@ config DRM_EXYNOS_DSI config DRM_EXYNOS_DP bool "EXYNOS DRM DP driver support" - depends on DRM_EXYNOS && (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) + depends on DRM_EXYNOS && (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) default DRM_EXYNOS select DRM_PANEL help @@ -77,6 +77,7 @@ config DRM_EXYNOS_VIDI config DRM_EXYNOS_G2D bool "Exynos DRM G2D" depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D + select FRAME_VECTOR help Choose this option if you want to use Exynos G2D for DRM. diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile index 7de0b1084..02aecfed6 100644 --- a/drivers/gpu/drm/exynos/Makefile +++ b/drivers/gpu/drm/exynos/Makefile @@ -3,10 +3,9 @@ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos -exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o \ - exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \ - exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \ - exynos_drm_plane.o exynos_drm_dmabuf.o +exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \ + exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \ + exynos_drm_plane.o exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 8b1225f24..b3c730770 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -54,6 +54,13 @@ static const char * const decon_clks_name[] = { "sclk_decon_eclk", }; +static const uint32_t decon_formats[] = { + DRM_FORMAT_XRGB1555, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, +}; + static int decon_enable_vblank(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; @@ -152,15 +159,15 @@ static void decon_commit(struct exynos_drm_crtc *crtc) #define OFFSIZE(x) (((x) & 0x3fff) << 14) #define PAGEWIDTH(x) ((x) & 0x3fff) -static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win) +static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, + struct drm_framebuffer *fb) { - struct exynos_drm_plane *plane = &ctx->planes[win]; unsigned long val; val = readl(ctx->addr + DECON_WINCONx(win)); val &= ~WINCONx_BPPMODE_MASK; - switch (plane->pixel_format) { + switch (fb->pixel_format) { case DRM_FORMAT_XRGB1555: val |= WINCONx_BPPMODE_16BPP_I1555; val |= WINCONx_HAWSWP_F; @@ -186,7 +193,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win) return; } - DRM_DEBUG_KMS("bpp = %u\n", plane->bpp); + DRM_DEBUG_KMS("bpp = %u\n", fb->bits_per_pixel); /* * In case of exynos, setting dma-burst to 16Word causes permanent @@ -196,7 +203,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win) * movement causes unstable DMA which results into iommu crash/tear. */ - if (plane->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) { + if (fb->width < MIN_FB_WIDTH_FOR_16WORD_BURST) { val &= ~WINCONx_BURSTLEN_MASK; val |= WINCONx_BURSTLEN_8WORD; } @@ -219,27 +226,35 @@ static void decon_shadow_protect_win(struct decon_context *ctx, int win, writel(val, ctx->addr + DECON_SHADOWCON); } -static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) +static void decon_atomic_begin(struct exynos_drm_crtc *crtc, + struct exynos_drm_plane *plane) { struct decon_context *ctx = crtc->ctx; - struct exynos_drm_plane *plane; - u32 val; - if (win < 0 || win >= WINDOWS_NR) + if (ctx->suspended) return; - plane = &ctx->planes[win]; + decon_shadow_protect_win(ctx, plane->zpos, true); +} + +static void decon_update_plane(struct exynos_drm_crtc *crtc, + struct exynos_drm_plane *plane) +{ + struct decon_context *ctx = crtc->ctx; + struct drm_plane_state *state = plane->base.state; + unsigned int win = plane->zpos; + unsigned int bpp = state->fb->bits_per_pixel >> 3; + unsigned int pitch = state->fb->pitches[0]; + u32 val; if (ctx->suspended) return; - decon_shadow_protect_win(ctx, win, true); - val = COORDINATE_X(plane->crtc_x) | COORDINATE_Y(plane->crtc_y); writel(val, ctx->addr + DECON_VIDOSDxA(win)); - val = COORDINATE_X(plane->crtc_x + plane->crtc_width - 1) | - COORDINATE_Y(plane->crtc_y + plane->crtc_height - 1); + val = COORDINATE_X(plane->crtc_x + plane->crtc_w - 1) | + COORDINATE_Y(plane->crtc_y + plane->crtc_h - 1); writel(val, ctx->addr + DECON_VIDOSDxB(win)); val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | @@ -252,42 +267,33 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) writel(plane->dma_addr[0], ctx->addr + DECON_VIDW0xADD0B0(win)); - val = plane->dma_addr[0] + plane->pitch * plane->crtc_height; + val = plane->dma_addr[0] + pitch * plane->crtc_h; writel(val, ctx->addr + DECON_VIDW0xADD1B0(win)); - val = OFFSIZE(plane->pitch - plane->crtc_width * (plane->bpp >> 3)) - | PAGEWIDTH(plane->crtc_width * (plane->bpp >> 3)); + val = OFFSIZE(pitch - plane->crtc_w * bpp) + | PAGEWIDTH(plane->crtc_w * bpp); writel(val, ctx->addr + DECON_VIDW0xADD2(win)); - decon_win_set_pixfmt(ctx, win); + decon_win_set_pixfmt(ctx, win, state->fb); /* window enable */ val = readl(ctx->addr + DECON_WINCONx(win)); val |= WINCONx_ENWIN_F; writel(val, ctx->addr + DECON_WINCONx(win)); - decon_shadow_protect_win(ctx, win, false); - /* standalone update */ val = readl(ctx->addr + DECON_UPDATE); val |= STANDALONE_UPDATE_F; writel(val, ctx->addr + DECON_UPDATE); - - if (ctx->i80_if) - atomic_set(&ctx->win_updated, 1); } -static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win) +static void decon_disable_plane(struct exynos_drm_crtc *crtc, + struct exynos_drm_plane *plane) { struct decon_context *ctx = crtc->ctx; - struct exynos_drm_plane *plane; + unsigned int win = plane->zpos; u32 val; - if (win < 0 || win >= WINDOWS_NR) - return; - - plane = &ctx->planes[win]; - if (ctx->suspended) return; @@ -306,6 +312,20 @@ static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win) writel(val, ctx->addr + DECON_UPDATE); } +static void decon_atomic_flush(struct exynos_drm_crtc *crtc, + struct exynos_drm_plane *plane) +{ + struct decon_context *ctx = crtc->ctx; + + if (ctx->suspended) + return; + + decon_shadow_protect_win(ctx, plane->zpos, false); + + if (ctx->i80_if) + atomic_set(&ctx->win_updated, 1); +} + static void decon_swreset(struct decon_context *ctx) { unsigned int tries; @@ -378,7 +398,7 @@ static void decon_disable(struct exynos_drm_crtc *crtc) * a destroyed buffer later. */ for (i = 0; i < WINDOWS_NR; i++) - decon_win_disable(crtc, i); + decon_disable_plane(crtc, &ctx->planes[i]); decon_swreset(ctx); @@ -407,7 +427,7 @@ void decon_te_irq_handler(struct exynos_drm_crtc *crtc) writel(val, ctx->addr + DECON_TRIGCON); } - drm_handle_vblank(ctx->drm_dev, ctx->pipe); + drm_crtc_handle_vblank(&ctx->crtc->base); } static void decon_clear_channels(struct exynos_drm_crtc *crtc) @@ -460,10 +480,11 @@ static struct exynos_drm_crtc_ops decon_crtc_ops = { .enable_vblank = decon_enable_vblank, .disable_vblank = decon_disable_vblank, .commit = decon_commit, - .win_commit = decon_win_commit, - .win_disable = decon_win_disable, + .atomic_begin = decon_atomic_begin, + .update_plane = decon_update_plane, + .disable_plane = decon_disable_plane, + .atomic_flush = decon_atomic_flush, .te_handler = decon_te_irq_handler, - .clear_channels = decon_clear_channels, }; static int decon_bind(struct device *dev, struct device *master, void *data) @@ -483,7 +504,8 @@ static int decon_bind(struct device *dev, struct device *master, void *data) type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], - 1 << ctx->pipe, type, zpos); + 1 << ctx->pipe, type, decon_formats, + ARRAY_SIZE(decon_formats), zpos); if (ret) return ret; } @@ -497,7 +519,9 @@ static int decon_bind(struct device *dev, struct device *master, void *data) goto err; } - ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, dev); + decon_clear_channels(ctx->crtc); + + ret = drm_iommu_attach_device(drm_dev, dev); if (ret) goto err; @@ -514,8 +538,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data) decon_disable(ctx->crtc); /* detach this sub driver from iommu mapping if supported. */ - if (is_drm_iommu_supported(ctx->drm_dev)) - drm_iommu_detach_device(ctx->drm_dev, ctx->dev); + drm_iommu_detach_device(ctx->drm_dev, ctx->dev); } static const struct component_ops decon_component_ops = { @@ -533,7 +556,7 @@ static irqreturn_t decon_vsync_irq_handler(int irq, void *dev_id) val = readl(ctx->addr + DECON_VIDINTCON1); if (val & VIDINTCON1_INTFRMPEND) { - drm_handle_vblank(ctx->drm_dev, ctx->pipe); + drm_crtc_handle_vblank(&ctx->crtc->base); /* clear */ writel(VIDINTCON1_INTFRMPEND, ctx->addr + DECON_VIDINTCON1); @@ -547,13 +570,21 @@ static irqreturn_t decon_lcd_sys_irq_handler(int irq, void *dev_id) { struct decon_context *ctx = dev_id; u32 val; + int win; if (!test_bit(BIT_CLKS_ENABLED, &ctx->enabled)) goto out; val = readl(ctx->addr + DECON_VIDINTCON1); if (val & VIDINTCON1_INTFRMDONEPEND) { - exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); + for (win = 0 ; win < WINDOWS_NR ; win++) { + struct exynos_drm_plane *plane = &ctx->planes[win]; + + if (!plane->pending_fb) + continue; + + exynos_drm_crtc_finish_update(ctx->crtc, plane); + } /* clear */ writel(VIDINTCON1_INTFRMDONEPEND, diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 362532afd..e6cbaca82 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -37,7 +37,6 @@ * DECON stands for Display and Enhancement controller. */ -#define DECON_DEFAULT_FRAMERATE 60 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 #define WINDOWS_NR 2 @@ -61,7 +60,7 @@ struct decon_context { atomic_t wait_vsync_event; struct exynos_drm_panel_info panel; - struct exynos_drm_display *display; + struct drm_encoder *encoder; }; static const struct of_device_id decon_driver_dt_match[] = { @@ -70,6 +69,18 @@ static const struct of_device_id decon_driver_dt_match[] = { }; MODULE_DEVICE_TABLE(of, decon_driver_dt_match); +static const uint32_t decon_formats[] = { + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, +}; + static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; @@ -126,7 +137,9 @@ static int decon_ctx_initialize(struct decon_context *ctx, ctx->drm_dev = drm_dev; ctx->pipe = priv->pipe++; - ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, ctx->dev); + decon_clear_channels(ctx->crtc); + + ret = drm_iommu_attach_device(drm_dev, ctx->dev); if (ret) priv->pipe--; @@ -136,8 +149,7 @@ static int decon_ctx_initialize(struct decon_context *ctx, static void decon_ctx_remove(struct decon_context *ctx) { /* detach this sub driver from iommu mapping if supported. */ - if (is_drm_iommu_supported(ctx->drm_dev)) - drm_iommu_detach_device(ctx->drm_dev, ctx->dev); + drm_iommu_detach_device(ctx->drm_dev, ctx->dev); } static u32 decon_calc_clkdiv(struct decon_context *ctx, @@ -152,16 +164,6 @@ static u32 decon_calc_clkdiv(struct decon_context *ctx, return (clkdiv < 0x100) ? clkdiv : 0xff; } -static bool decon_mode_fixup(struct exynos_drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - if (adjusted_mode->vrefresh == 0) - adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE; - - return true; -} - static void decon_commit(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; @@ -271,16 +273,16 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc) } } -static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win) +static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, + struct drm_framebuffer *fb) { - struct exynos_drm_plane *plane = &ctx->planes[win]; unsigned long val; int padding; val = readl(ctx->regs + WINCON(win)); val &= ~WINCONx_BPPMODE_MASK; - switch (plane->pixel_format) { + switch (fb->pixel_format) { case DRM_FORMAT_RGB565: val |= WINCONx_BPPMODE_16BPP_565; val |= WINCONx_BURSTLEN_16WORD; @@ -329,7 +331,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win) break; } - DRM_DEBUG_KMS("bpp = %d\n", plane->bpp); + DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel); /* * In case of exynos, setting dma-burst to 16Word causes permanent @@ -339,8 +341,8 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win) * movement causes unstable DMA which results into iommu crash/tear. */ - padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width; - if (plane->fb_width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) { + padding = (fb->pitches[0] / (fb->bits_per_pixel >> 3)) - fb->width; + if (fb->width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) { val &= ~WINCONx_BURSTLEN_MASK; val |= WINCONx_BURSTLEN_8WORD; } @@ -382,23 +384,30 @@ static void decon_shadow_protect_win(struct decon_context *ctx, writel(val, ctx->regs + SHADOWCON); } -static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) +static void decon_atomic_begin(struct exynos_drm_crtc *crtc, + struct exynos_drm_plane *plane) { struct decon_context *ctx = crtc->ctx; - struct drm_display_mode *mode = &crtc->base.state->adjusted_mode; - struct exynos_drm_plane *plane; - int padding; - unsigned long val, alpha; - unsigned int last_x; - unsigned int last_y; if (ctx->suspended) return; - if (win < 0 || win >= WINDOWS_NR) - return; + decon_shadow_protect_win(ctx, plane->zpos, true); +} - plane = &ctx->planes[win]; +static void decon_update_plane(struct exynos_drm_crtc *crtc, + struct exynos_drm_plane *plane) +{ + struct decon_context *ctx = crtc->ctx; + struct drm_display_mode *mode = &crtc->base.state->adjusted_mode; + struct drm_plane_state *state = plane->base.state; + int padding; + unsigned long val, alpha; + unsigned int last_x; + unsigned int last_y; + unsigned int win = plane->zpos; + unsigned int bpp = state->fb->bits_per_pixel >> 3; + unsigned int pitch = state->fb->pitches[0]; if (ctx->suspended) return; @@ -413,18 +422,15 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) * is set. */ - /* protect windows */ - decon_shadow_protect_win(ctx, win, true); - /* buffer start address */ val = (unsigned long)plane->dma_addr[0]; writel(val, ctx->regs + VIDW_BUF_START(win)); - padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width; + padding = (pitch / bpp) - state->fb->width; /* buffer size */ - writel(plane->fb_width + padding, ctx->regs + VIDW_WHOLE_X(win)); - writel(plane->fb_height, ctx->regs + VIDW_WHOLE_Y(win)); + writel(state->fb->width + padding, ctx->regs + VIDW_WHOLE_X(win)); + writel(state->fb->height, ctx->regs + VIDW_WHOLE_Y(win)); /* offset from the start of the buffer to read */ writel(plane->src_x, ctx->regs + VIDW_OFFSET_X(win)); @@ -433,25 +439,25 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) DRM_DEBUG_KMS("start addr = 0x%lx\n", (unsigned long)val); DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", - plane->crtc_width, plane->crtc_height); + plane->crtc_w, plane->crtc_h); /* * OSD position. * In case the window layout goes of LCD layout, DECON fails. */ - if ((plane->crtc_x + plane->crtc_width) > mode->hdisplay) - plane->crtc_x = mode->hdisplay - plane->crtc_width; - if ((plane->crtc_y + plane->crtc_height) > mode->vdisplay) - plane->crtc_y = mode->vdisplay - plane->crtc_height; + if ((plane->crtc_x + plane->crtc_w) > mode->hdisplay) + plane->crtc_x = mode->hdisplay - plane->crtc_w; + if ((plane->crtc_y + plane->crtc_h) > mode->vdisplay) + plane->crtc_y = mode->vdisplay - plane->crtc_h; val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) | VIDOSDxA_TOPLEFT_Y(plane->crtc_y); writel(val, ctx->regs + VIDOSD_A(win)); - last_x = plane->crtc_x + plane->crtc_width; + last_x = plane->crtc_x + plane->crtc_w; if (last_x) last_x--; - last_y = plane->crtc_y + plane->crtc_height; + last_y = plane->crtc_y + plane->crtc_h; if (last_y) last_y--; @@ -475,7 +481,7 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) writel(alpha, ctx->regs + VIDOSD_D(win)); - decon_win_set_pixfmt(ctx, win); + decon_win_set_pixfmt(ctx, win, state->fb); /* hardware window 0 doesn't support color key. */ if (win != 0) @@ -495,17 +501,13 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) writel(val, ctx->regs + DECON_UPDATE); } -static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win) +static void decon_disable_plane(struct exynos_drm_crtc *crtc, + struct exynos_drm_plane *plane) { struct decon_context *ctx = crtc->ctx; - struct exynos_drm_plane *plane; + unsigned int win = plane->zpos; u32 val; - if (win < 0 || win >= WINDOWS_NR) - return; - - plane = &ctx->planes[win]; - if (ctx->suspended) return; @@ -517,14 +519,22 @@ static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win) val &= ~WINCONx_ENWIN; writel(val, ctx->regs + WINCON(win)); - /* unprotect windows */ - decon_shadow_protect_win(ctx, win, false); - val = readl(ctx->regs + DECON_UPDATE); val |= DECON_UPDATE_STANDALONE_F; writel(val, ctx->regs + DECON_UPDATE); } +static void decon_atomic_flush(struct exynos_drm_crtc *crtc, + struct exynos_drm_plane *plane) +{ + struct decon_context *ctx = crtc->ctx; + + if (ctx->suspended) + return; + + decon_shadow_protect_win(ctx, plane->zpos, false); +} + static void decon_init(struct decon_context *ctx) { u32 val; @@ -601,7 +611,7 @@ static void decon_disable(struct exynos_drm_crtc *crtc) * a destroyed buffer later. */ for (i = 0; i < WINDOWS_NR; i++) - decon_win_disable(crtc, i); + decon_disable_plane(crtc, &ctx->planes[i]); clk_disable_unprepare(ctx->vclk); clk_disable_unprepare(ctx->eclk); @@ -616,14 +626,14 @@ static void decon_disable(struct exynos_drm_crtc *crtc) static const struct exynos_drm_crtc_ops decon_crtc_ops = { .enable = decon_enable, .disable = decon_disable, - .mode_fixup = decon_mode_fixup, .commit = decon_commit, .enable_vblank = decon_enable_vblank, .disable_vblank = decon_disable_vblank, .wait_for_vblank = decon_wait_for_vblank, - .win_commit = decon_win_commit, - .win_disable = decon_win_disable, - .clear_channels = decon_clear_channels, + .atomic_begin = decon_atomic_begin, + .update_plane = decon_update_plane, + .disable_plane = decon_disable_plane, + .atomic_flush = decon_atomic_flush, }; @@ -631,6 +641,7 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id) { struct decon_context *ctx = (struct decon_context *)dev_id; u32 val, clear_bit; + int win; val = readl(ctx->regs + VIDINTCON1); @@ -643,8 +654,15 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id) goto out; if (!ctx->i80_if) { - drm_handle_vblank(ctx->drm_dev, ctx->pipe); - exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); + drm_crtc_handle_vblank(&ctx->crtc->base); + for (win = 0 ; win < WINDOWS_NR ; win++) { + struct exynos_drm_plane *plane = &ctx->planes[win]; + + if (!plane->pending_fb) + continue; + + exynos_drm_crtc_finish_update(ctx->crtc, plane); + } /* set wait vsync event to zero and wake up queue. */ if (atomic_read(&ctx->wait_vsync_event)) { @@ -675,7 +693,8 @@ static int decon_bind(struct device *dev, struct device *master, void *data) type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], - 1 << ctx->pipe, type, zpos); + 1 << ctx->pipe, type, decon_formats, + ARRAY_SIZE(decon_formats), zpos); if (ret) return ret; } @@ -689,8 +708,8 @@ static int decon_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(ctx->crtc); } - if (ctx->display) - exynos_drm_create_enc_conn(drm_dev, ctx->display); + if (ctx->encoder) + exynos_dpi_bind(drm_dev, ctx->encoder); return 0; @@ -703,8 +722,8 @@ static void decon_unbind(struct device *dev, struct device *master, decon_disable(ctx->crtc); - if (ctx->display) - exynos_dpi_remove(ctx->display); + if (ctx->encoder) + exynos_dpi_remove(ctx->encoder); decon_ctx_remove(ctx); } @@ -789,9 +808,9 @@ static int decon_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ctx); - ctx->display = exynos_dpi_probe(dev); - if (IS_ERR(ctx->display)) { - ret = PTR_ERR(ctx->display); + ctx->encoder = exynos_dpi_probe(dev); + if (IS_ERR(ctx->encoder)) { + ret = PTR_ERR(ctx->encoder); goto err_iounmap; } diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c index 172b8002a..124fb9a56 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c @@ -32,19 +32,20 @@ #include #include "exynos_dp_core.h" +#include "exynos_drm_crtc.h" #define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \ connector) static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp) { - return to_exynos_crtc(dp->encoder->crtc); + return to_exynos_crtc(dp->encoder.crtc); } -static inline struct exynos_dp_device * -display_to_dp(struct exynos_drm_display *d) +static inline struct exynos_dp_device *encoder_to_dp( + struct drm_encoder *e) { - return container_of(d, struct exynos_dp_device, display); + return container_of(e, struct exynos_dp_device, encoder); } struct bridge_init { @@ -795,9 +796,6 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp) /* Configure video slave mode */ exynos_dp_enable_video_master(dp, 0); - /* Enable video */ - exynos_dp_start_video(dp); - timeout_loop = 0; for (;;) { @@ -891,9 +889,9 @@ static void exynos_dp_hotplug(struct work_struct *work) drm_helper_hpd_irq_event(dp->drm_dev); } -static void exynos_dp_commit(struct exynos_drm_display *display) +static void exynos_dp_commit(struct drm_encoder *encoder) { - struct exynos_dp_device *dp = display_to_dp(display); + struct exynos_dp_device *dp = encoder_to_dp(encoder); int ret; /* Keep the panel disabled while we configure video */ @@ -938,6 +936,9 @@ static void exynos_dp_commit(struct exynos_drm_display *display) if (drm_panel_enable(dp->panel)) DRM_ERROR("failed to enable the panel\n"); } + + /* Enable video */ + exynos_dp_start_video(dp); } static enum drm_connector_status exynos_dp_detect( @@ -994,7 +995,7 @@ static struct drm_encoder *exynos_dp_best_encoder( { struct exynos_dp_device *dp = ctx_from_connector(connector); - return dp->encoder; + return &dp->encoder; } static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = { @@ -1019,15 +1020,12 @@ static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp, return 0; } -static int exynos_dp_create_connector(struct exynos_drm_display *display, - struct drm_encoder *encoder) +static int exynos_dp_create_connector(struct drm_encoder *encoder) { - struct exynos_dp_device *dp = display_to_dp(display); + struct exynos_dp_device *dp = encoder_to_dp(encoder); struct drm_connector *connector = &dp->connector; int ret; - dp->encoder = encoder; - /* Pre-empt DP connector creation if there's a bridge */ if (dp->bridge) { ret = exynos_drm_attach_lcd_bridge(dp, encoder); @@ -1054,20 +1052,22 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display, return ret; } -static void exynos_dp_phy_init(struct exynos_dp_device *dp) +static bool exynos_dp_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { - if (dp->phy) - phy_power_on(dp->phy); + return true; } -static void exynos_dp_phy_exit(struct exynos_dp_device *dp) +static void exynos_dp_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { - if (dp->phy) - phy_power_off(dp->phy); } -static void exynos_dp_poweron(struct exynos_dp_device *dp) +static void exynos_dp_enable(struct drm_encoder *encoder) { + struct exynos_dp_device *dp = encoder_to_dp(encoder); struct exynos_drm_crtc *crtc = dp_to_crtc(dp); if (dp->dpms_mode == DRM_MODE_DPMS_ON) @@ -1084,14 +1084,17 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp) crtc->ops->clock_enable(dp_to_crtc(dp), true); clk_prepare_enable(dp->clock); - exynos_dp_phy_init(dp); + phy_power_on(dp->phy); exynos_dp_init_dp(dp); enable_irq(dp->irq); - exynos_dp_commit(&dp->display); + exynos_dp_commit(&dp->encoder); + + dp->dpms_mode = DRM_MODE_DPMS_ON; } -static void exynos_dp_poweroff(struct exynos_dp_device *dp) +static void exynos_dp_disable(struct drm_encoder *encoder) { + struct exynos_dp_device *dp = encoder_to_dp(encoder); struct exynos_drm_crtc *crtc = dp_to_crtc(dp); if (dp->dpms_mode != DRM_MODE_DPMS_ON) @@ -1106,7 +1109,7 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp) disable_irq(dp->irq); flush_work(&dp->hotplug_work); - exynos_dp_phy_exit(dp); + phy_power_off(dp->phy); clk_disable_unprepare(dp->clock); if (crtc->ops->clock_enable) @@ -1116,31 +1119,19 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp) if (drm_panel_unprepare(dp->panel)) DRM_ERROR("failed to turnoff the panel\n"); } -} - -static void exynos_dp_dpms(struct exynos_drm_display *display, int mode) -{ - struct exynos_dp_device *dp = display_to_dp(display); - switch (mode) { - case DRM_MODE_DPMS_ON: - exynos_dp_poweron(dp); - break; - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - case DRM_MODE_DPMS_OFF: - exynos_dp_poweroff(dp); - break; - default: - break; - } - dp->dpms_mode = mode; + dp->dpms_mode = DRM_MODE_DPMS_OFF; } -static struct exynos_drm_display_ops exynos_dp_display_ops = { - .create_connector = exynos_dp_create_connector, - .dpms = exynos_dp_dpms, - .commit = exynos_dp_commit, +static struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = { + .mode_fixup = exynos_dp_mode_fixup, + .mode_set = exynos_dp_mode_set, + .enable = exynos_dp_enable, + .disable = exynos_dp_disable, +}; + +static struct drm_encoder_funcs exynos_dp_encoder_funcs = { + .destroy = drm_encoder_cleanup, }; static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev) @@ -1219,9 +1210,10 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) struct exynos_dp_device *dp = dev_get_drvdata(dev); struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm_dev = data; + struct drm_encoder *encoder = &dp->encoder; struct resource *res; unsigned int irq_flags; - int ret = 0; + int pipe, ret = 0; dp->dev = &pdev->dev; dp->dpms_mode = DRM_MODE_DPMS_OFF; @@ -1297,7 +1289,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug); - exynos_dp_phy_init(dp); + phy_power_on(dp->phy); exynos_dp_init_dp(dp); @@ -1311,7 +1303,28 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) dp->drm_dev = drm_dev; - return exynos_drm_create_enc_conn(drm_dev, &dp->display); + pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev, + EXYNOS_DISPLAY_TYPE_LCD); + if (pipe < 0) + return pipe; + + encoder->possible_crtcs = 1 << pipe; + + DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); + + drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + + drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs); + + ret = exynos_dp_create_connector(encoder); + if (ret) { + DRM_ERROR("failed to create connector ret = %d\n", ret); + drm_encoder_cleanup(encoder); + return ret; + } + + return 0; } static void exynos_dp_unbind(struct device *dev, struct device *master, @@ -1319,7 +1332,7 @@ static void exynos_dp_unbind(struct device *dev, struct device *master, { struct exynos_dp_device *dp = dev_get_drvdata(dev); - exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF); + exynos_dp_disable(&dp->encoder); } static const struct component_ops exynos_dp_ops = { @@ -1338,8 +1351,6 @@ static int exynos_dp_probe(struct platform_device *pdev) if (!dp) return -ENOMEM; - dp->display.type = EXYNOS_DISPLAY_TYPE_LCD; - dp->display.ops = &exynos_dp_display_ops; platform_set_drvdata(pdev, dp); panel_node = of_parse_phandle(dev->of_node, "panel", 0); @@ -1372,28 +1383,6 @@ static int exynos_dp_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int exynos_dp_suspend(struct device *dev) -{ - struct exynos_dp_device *dp = dev_get_drvdata(dev); - - exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF); - return 0; -} - -static int exynos_dp_resume(struct device *dev) -{ - struct exynos_dp_device *dp = dev_get_drvdata(dev); - - exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_ON); - return 0; -} -#endif - -static const struct dev_pm_ops exynos_dp_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume) -}; - static const struct of_device_id exynos_dp_match[] = { { .compatible = "samsung,exynos5-dp" }, {}, @@ -1406,7 +1395,6 @@ struct platform_driver dp_driver = { .driver = { .name = "exynos-dp", .owner = THIS_MODULE, - .pm = &exynos_dp_pm_ops, .of_match_table = exynos_dp_match, }, }; diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h index a4e799679..e413b6f7b 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.h +++ b/drivers/gpu/drm/exynos/exynos_dp_core.h @@ -147,11 +147,10 @@ struct link_train { }; struct exynos_dp_device { - struct exynos_drm_display display; + struct drm_encoder encoder; struct device *dev; struct drm_device *drm_dev; struct drm_connector connector; - struct drm_encoder *encoder; struct drm_panel *panel; struct drm_bridge *bridge; struct clk *clock; diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c deleted file mode 100644 index 24994ba10..000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.c +++ /dev/null @@ -1,186 +0,0 @@ -/* exynos_drm_buf.c - * - * Copyright (c) 2011 Samsung Electronics Co., Ltd. - * Author: Inki Dae - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include -#include - -#include "exynos_drm_drv.h" -#include "exynos_drm_gem.h" -#include "exynos_drm_buf.h" -#include "exynos_drm_iommu.h" - -static int lowlevel_buffer_allocate(struct drm_device *dev, - unsigned int flags, struct exynos_drm_gem_buf *buf) -{ - int ret = 0; - enum dma_attr attr; - unsigned int nr_pages; - - if (buf->dma_addr) { - DRM_DEBUG_KMS("already allocated.\n"); - return 0; - } - - init_dma_attrs(&buf->dma_attrs); - - /* - * if EXYNOS_BO_CONTIG, fully physically contiguous memory - * region will be allocated else physically contiguous - * as possible. - */ - if (!(flags & EXYNOS_BO_NONCONTIG)) - dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs); - - /* - * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping - * else cachable mapping. - */ - if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE)) - attr = DMA_ATTR_WRITE_COMBINE; - else - attr = DMA_ATTR_NON_CONSISTENT; - - dma_set_attr(attr, &buf->dma_attrs); - dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs); - - nr_pages = buf->size >> PAGE_SHIFT; - - if (!is_drm_iommu_supported(dev)) { - dma_addr_t start_addr; - unsigned int i = 0; - - buf->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); - if (!buf->pages) { - DRM_ERROR("failed to allocate pages.\n"); - return -ENOMEM; - } - - buf->cookie = dma_alloc_attrs(dev->dev, - buf->size, - &buf->dma_addr, GFP_KERNEL, - &buf->dma_attrs); - if (!buf->cookie) { - DRM_ERROR("failed to allocate buffer.\n"); - ret = -ENOMEM; - goto err_free; - } - - start_addr = buf->dma_addr; - while (i < nr_pages) { - buf->pages[i] = phys_to_page(start_addr); - start_addr += PAGE_SIZE; - i++; - } - } else { - - buf->pages = dma_alloc_attrs(dev->dev, buf->size, - &buf->dma_addr, GFP_KERNEL, - &buf->dma_attrs); - if (!buf->pages) { - DRM_ERROR("failed to allocate buffer.\n"); - return -ENOMEM; - } - } - - buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages); - if (IS_ERR(buf->sgt)) { - DRM_ERROR("failed to get sg table.\n"); - ret = PTR_ERR(buf->sgt); - goto err_free_attrs; - } - - DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", - (unsigned long)buf->dma_addr, - buf->size); - - return ret; - -err_free_attrs: - dma_free_attrs(dev->dev, buf->size, buf->pages, - (dma_addr_t)buf->dma_addr, &buf->dma_attrs); - buf->dma_addr = (dma_addr_t)NULL; -err_free: - if (!is_drm_iommu_supported(dev)) - drm_free_large(buf->pages); - - return ret; -} - -static void lowlevel_buffer_deallocate(struct drm_device *dev, - unsigned int flags, struct exynos_drm_gem_buf *buf) -{ - if (!buf->dma_addr) { - DRM_DEBUG_KMS("dma_addr is invalid.\n"); - return; - } - - DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", - (unsigned long)buf->dma_addr, - buf->size); - - sg_free_table(buf->sgt); - - kfree(buf->sgt); - buf->sgt = NULL; - - if (!is_drm_iommu_supported(dev)) { - dma_free_attrs(dev->dev, buf->size, buf->cookie, - (dma_addr_t)buf->dma_addr, &buf->dma_attrs); - drm_free_large(buf->pages); - } else - dma_free_attrs(dev->dev, buf->size, buf->pages, - (dma_addr_t)buf->dma_addr, &buf->dma_attrs); - - buf->dma_addr = (dma_addr_t)NULL; -} - -struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev, - unsigned int size) -{ - struct exynos_drm_gem_buf *buffer; - - DRM_DEBUG_KMS("desired size = 0x%x\n", size); - - buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); - if (!buffer) - return NULL; - - buffer->size = size; - return buffer; -} - -void exynos_drm_fini_buf(struct drm_device *dev, - struct exynos_drm_gem_buf *buffer) -{ - kfree(buffer); - buffer = NULL; -} - -int exynos_drm_alloc_buf(struct drm_device *dev, - struct exynos_drm_gem_buf *buf, unsigned int flags) -{ - - /* - * allocate memory region and set the memory information - * to vaddr and dma_addr of a buffer object. - */ - if (lowlevel_buffer_allocate(dev, flags, buf) < 0) - return -ENOMEM; - - return 0; -} - -void exynos_drm_free_buf(struct drm_device *dev, - unsigned int flags, struct exynos_drm_gem_buf *buffer) -{ - - lowlevel_buffer_deallocate(dev, flags, buffer); -} diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h deleted file mode 100644 index a6412f196..000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.h +++ /dev/null @@ -1,33 +0,0 @@ -/* exynos_drm_buf.h - * - * Copyright (c) 2011 Samsung Electronics Co., Ltd. - * Author: Inki Dae - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef _EXYNOS_DRM_BUF_H_ -#define _EXYNOS_DRM_BUF_H_ - -/* create and initialize buffer object. */ -struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev, - unsigned int size); - -/* destroy buffer object. */ -void exynos_drm_fini_buf(struct drm_device *dev, - struct exynos_drm_gem_buf *buffer); - -/* allocate physical memory region and setup sgt. */ -int exynos_drm_alloc_buf(struct drm_device *dev, - struct exynos_drm_gem_buf *buf, - unsigned int flags); - -/* release physical memory region, and sgt. */ -void exynos_drm_free_buf(struct drm_device *dev, - unsigned int flags, - struct exynos_drm_gem_buf *buffer); - -#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c index 4c9f972ea..7f55ba677 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_core.c +++ b/drivers/gpu/drm/exynos/exynos_drm_core.c @@ -15,46 +15,10 @@ #include #include "exynos_drm_drv.h" #include "exynos_drm_crtc.h" -#include "exynos_drm_encoder.h" #include "exynos_drm_fbdev.h" static LIST_HEAD(exynos_drm_subdrv_list); -int exynos_drm_create_enc_conn(struct drm_device *dev, - struct exynos_drm_display *display) -{ - struct drm_encoder *encoder; - int ret; - unsigned long possible_crtcs = 0; - - ret = exynos_drm_crtc_get_pipe_from_type(dev, display->type); - if (ret < 0) - return ret; - - possible_crtcs |= 1 << ret; - - /* create and initialize a encoder for this sub driver. */ - encoder = exynos_drm_encoder_create(dev, display, possible_crtcs); - if (!encoder) { - DRM_ERROR("failed to create encoder\n"); - return -EFAULT; - } - - display->encoder = encoder; - - ret = display->ops->create_connector(display, encoder); - if (ret) { - DRM_ERROR("failed to create connector ret = %d\n", ret); - goto err_destroy_encoder; - } - - return 0; - -err_destroy_encoder: - encoder->funcs->destroy(encoder); - return ret; -} - int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv) { if (!subdrv) @@ -64,7 +28,6 @@ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register); int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) { @@ -75,7 +38,6 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister); int exynos_drm_device_subdrv_probe(struct drm_device *dev) { @@ -105,7 +67,6 @@ int exynos_drm_device_subdrv_probe(struct drm_device *dev) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe); int exynos_drm_device_subdrv_remove(struct drm_device *dev) { @@ -123,7 +84,6 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove); int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) { @@ -147,7 +107,6 @@ err: } return ret; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open); void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) { @@ -158,4 +117,3 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) subdrv->close(dev, subdrv->dev, file); } } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 644b4b76e..ed28823d3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -19,21 +19,15 @@ #include "exynos_drm_crtc.h" #include "exynos_drm_drv.h" -#include "exynos_drm_encoder.h" #include "exynos_drm_plane.h" static void exynos_drm_crtc_enable(struct drm_crtc *crtc) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - if (exynos_crtc->enabled) - return; - if (exynos_crtc->ops->enable) exynos_crtc->ops->enable(exynos_crtc); - exynos_crtc->enabled = true; - drm_crtc_vblank_on(crtc); } @@ -41,34 +35,10 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - if (!exynos_crtc->enabled) - return; - - /* wait for the completion of page flip. */ - if (!wait_event_timeout(exynos_crtc->pending_flip_queue, - (exynos_crtc->event == NULL), HZ/20)) - exynos_crtc->event = NULL; - drm_crtc_vblank_off(crtc); if (exynos_crtc->ops->disable) exynos_crtc->ops->disable(exynos_crtc); - - exynos_crtc->enabled = false; -} - -static bool -exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - - if (exynos_crtc->ops->mode_fixup) - return exynos_crtc->ops->mode_fixup(exynos_crtc, mode, - adjusted_mode); - - return true; } static void @@ -80,24 +50,41 @@ exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) exynos_crtc->ops->commit(exynos_crtc); } -static void exynos_crtc_atomic_begin(struct drm_crtc *crtc) +static void exynos_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); + struct drm_plane *plane; + + exynos_crtc->event = crtc->state->event; + + drm_atomic_crtc_for_each_plane(plane, crtc) { + struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); - if (crtc->state->event) { - WARN_ON(drm_crtc_vblank_get(crtc) != 0); - exynos_crtc->event = crtc->state->event; + if (exynos_crtc->ops->atomic_begin) + exynos_crtc->ops->atomic_begin(exynos_crtc, + exynos_plane); } } -static void exynos_crtc_atomic_flush(struct drm_crtc *crtc) +static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) { + struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); + struct drm_plane *plane; + + drm_atomic_crtc_for_each_plane(plane, crtc) { + struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); + + if (exynos_crtc->ops->atomic_flush) + exynos_crtc->ops->atomic_flush(exynos_crtc, + exynos_plane); + } } static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { .enable = exynos_drm_crtc_enable, .disable = exynos_drm_crtc_disable, - .mode_fixup = exynos_drm_crtc_mode_fixup, .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, .atomic_begin = exynos_crtc_atomic_begin, .atomic_flush = exynos_crtc_atomic_flush, @@ -139,13 +126,13 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, if (!exynos_crtc) return ERR_PTR(-ENOMEM); - init_waitqueue_head(&exynos_crtc->pending_flip_queue); - exynos_crtc->pipe = pipe; exynos_crtc->type = type; exynos_crtc->ops = ops; exynos_crtc->ctx = ctx; + init_waitqueue_head(&exynos_crtc->wait_update); + crtc = &exynos_crtc->base; private->crtc[pipe] = crtc; @@ -171,11 +158,8 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe) struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(private->crtc[pipe]); - if (!exynos_crtc->enabled) - return -EPERM; - if (exynos_crtc->ops->enable_vblank) - exynos_crtc->ops->enable_vblank(exynos_crtc); + return exynos_crtc->ops->enable_vblank(exynos_crtc); return 0; } @@ -186,31 +170,34 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe) struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(private->crtc[pipe]); - if (!exynos_crtc->enabled) - return; - if (exynos_crtc->ops->disable_vblank) exynos_crtc->ops->disable_vblank(exynos_crtc); } -void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe) +void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc) +{ + wait_event_timeout(exynos_crtc->wait_update, + (atomic_read(&exynos_crtc->pending_update) == 0), + msecs_to_jiffies(50)); +} + +void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc, + struct exynos_drm_plane *exynos_plane) { - struct exynos_drm_private *dev_priv = dev->dev_private; - struct drm_crtc *drm_crtc = dev_priv->crtc[pipe]; - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc); + struct drm_crtc *crtc = &exynos_crtc->base; unsigned long flags; - spin_lock_irqsave(&dev->event_lock, flags); - if (exynos_crtc->event) { + exynos_plane->pending_fb = NULL; - drm_send_vblank_event(dev, -1, exynos_crtc->event); - drm_vblank_put(dev, pipe); - wake_up(&exynos_crtc->pending_flip_queue); + if (atomic_dec_and_test(&exynos_crtc->pending_update)) + wake_up(&exynos_crtc->wait_update); - } + spin_lock_irqsave(&crtc->dev->event_lock, flags); + if (exynos_crtc->event) + drm_crtc_send_vblank_event(crtc, exynos_crtc->event); exynos_crtc->event = NULL; - spin_unlock_irqrestore(&dev->event_lock, flags); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); } void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb) @@ -237,7 +224,7 @@ void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb) } int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, - unsigned int out_type) + enum exynos_drm_output_type out_type) { struct drm_crtc *crtc; diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h index 0f3aa7081..f87d4abda 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h @@ -25,12 +25,14 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, void *context); int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe); void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe); -void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe); +void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc); +void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc, + struct exynos_drm_plane *exynos_plane); void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb); /* This function gets pipe value to crtc device matched with out_type. */ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, - unsigned int out_type); + enum exynos_drm_output_type out_type); /* * This function calls the crtc device(manager)'s te_handler() callback diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c deleted file mode 100644 index cd485c091..000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ /dev/null @@ -1,286 +0,0 @@ -/* exynos_drm_dmabuf.c - * - * Copyright (c) 2012 Samsung Electronics Co., Ltd. - * Author: Inki Dae - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include -#include -#include "exynos_drm_dmabuf.h" -#include "exynos_drm_drv.h" -#include "exynos_drm_gem.h" - -#include - -struct exynos_drm_dmabuf_attachment { - struct sg_table sgt; - enum dma_data_direction dir; - bool is_mapped; -}; - -static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf) -{ - return to_exynos_gem_obj(buf->priv); -} - -static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf, - struct device *dev, - struct dma_buf_attachment *attach) -{ - struct exynos_drm_dmabuf_attachment *exynos_attach; - - exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL); - if (!exynos_attach) - return -ENOMEM; - - exynos_attach->dir = DMA_NONE; - attach->priv = exynos_attach; - - return 0; -} - -static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf, - struct dma_buf_attachment *attach) -{ - struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv; - struct sg_table *sgt; - - if (!exynos_attach) - return; - - sgt = &exynos_attach->sgt; - - if (exynos_attach->dir != DMA_NONE) - dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, - exynos_attach->dir); - - sg_free_table(sgt); - kfree(exynos_attach); - attach->priv = NULL; -} - -static struct sg_table * - exynos_gem_map_dma_buf(struct dma_buf_attachment *attach, - enum dma_data_direction dir) -{ - struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv; - struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf); - struct drm_device *dev = gem_obj->base.dev; - struct exynos_drm_gem_buf *buf; - struct scatterlist *rd, *wr; - struct sg_table *sgt = NULL; - unsigned int i; - int nents, ret; - - /* just return current sgt if already requested. */ - if (exynos_attach->dir == dir && exynos_attach->is_mapped) - return &exynos_attach->sgt; - - buf = gem_obj->buffer; - if (!buf) { - DRM_ERROR("buffer is null.\n"); - return ERR_PTR(-ENOMEM); - } - - sgt = &exynos_attach->sgt; - - ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL); - if (ret) { - DRM_ERROR("failed to alloc sgt.\n"); - return ERR_PTR(-ENOMEM); - } - - mutex_lock(&dev->struct_mutex); - - rd = buf->sgt->sgl; - wr = sgt->sgl; - for (i = 0; i < sgt->orig_nents; ++i) { - sg_set_page(wr, sg_page(rd), rd->length, rd->offset); - rd = sg_next(rd); - wr = sg_next(wr); - } - - if (dir != DMA_NONE) { - nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); - if (!nents) { - DRM_ERROR("failed to map sgl with iommu.\n"); - sg_free_table(sgt); - sgt = ERR_PTR(-EIO); - goto err_unlock; - } - } - - exynos_attach->is_mapped = true; - exynos_attach->dir = dir; - attach->priv = exynos_attach; - - DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size); - -err_unlock: - mutex_unlock(&dev->struct_mutex); - return sgt; -} - -static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach, - struct sg_table *sgt, - enum dma_data_direction dir) -{ - /* Nothing to do. */ -} - -static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, - unsigned long page_num) -{ - /* TODO */ - - return NULL; -} - -static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, - unsigned long page_num, - void *addr) -{ - /* TODO */ -} - -static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf, - unsigned long page_num) -{ - /* TODO */ - - return NULL; -} - -static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf, - unsigned long page_num, void *addr) -{ - /* TODO */ -} - -static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf, - struct vm_area_struct *vma) -{ - return -ENOTTY; -} - -static struct dma_buf_ops exynos_dmabuf_ops = { - .attach = exynos_gem_attach_dma_buf, - .detach = exynos_gem_detach_dma_buf, - .map_dma_buf = exynos_gem_map_dma_buf, - .unmap_dma_buf = exynos_gem_unmap_dma_buf, - .kmap = exynos_gem_dmabuf_kmap, - .kmap_atomic = exynos_gem_dmabuf_kmap_atomic, - .kunmap = exynos_gem_dmabuf_kunmap, - .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic, - .mmap = exynos_gem_dmabuf_mmap, - .release = drm_gem_dmabuf_release, -}; - -struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev, - struct drm_gem_object *obj, int flags) -{ - struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); - - exp_info.ops = &exynos_dmabuf_ops; - exp_info.size = exynos_gem_obj->base.size; - exp_info.flags = flags; - exp_info.priv = obj; - - return dma_buf_export(&exp_info); -} - -struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, - struct dma_buf *dma_buf) -{ - struct dma_buf_attachment *attach; - struct sg_table *sgt; - struct scatterlist *sgl; - struct exynos_drm_gem_obj *exynos_gem_obj; - struct exynos_drm_gem_buf *buffer; - int ret; - - /* is this one of own objects? */ - if (dma_buf->ops == &exynos_dmabuf_ops) { - struct drm_gem_object *obj; - - obj = dma_buf->priv; - - /* is it from our device? */ - if (obj->dev == drm_dev) { - /* - * Importing dmabuf exported from out own gem increases - * refcount on gem itself instead of f_count of dmabuf. - */ - drm_gem_object_reference(obj); - return obj; - } - } - - attach = dma_buf_attach(dma_buf, drm_dev->dev); - if (IS_ERR(attach)) - return ERR_PTR(-EINVAL); - - get_dma_buf(dma_buf); - - sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); - if (IS_ERR(sgt)) { - ret = PTR_ERR(sgt); - goto err_buf_detach; - } - - buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); - if (!buffer) { - ret = -ENOMEM; - goto err_unmap_attach; - } - - exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); - if (!exynos_gem_obj) { - ret = -ENOMEM; - goto err_free_buffer; - } - - sgl = sgt->sgl; - - buffer->size = dma_buf->size; - buffer->dma_addr = sg_dma_address(sgl); - - if (sgt->nents == 1) { - /* always physically continuous memory if sgt->nents is 1. */ - exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; - } else { - /* - * this case could be CONTIG or NONCONTIG type but for now - * sets NONCONTIG. - * TODO. we have to find a way that exporter can notify - * the type of its own buffer to importer. - */ - exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; - } - - exynos_gem_obj->buffer = buffer; - buffer->sgt = sgt; - exynos_gem_obj->base.import_attach = attach; - - DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr, - buffer->size); - - return &exynos_gem_obj->base; - -err_free_buffer: - kfree(buffer); - buffer = NULL; -err_unmap_attach: - dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); -err_buf_detach: - dma_buf_detach(dma_buf, attach); - dma_buf_put(dma_buf); - - return ERR_PTR(ret); -} diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h deleted file mode 100644 index 886de9ff4..000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h +++ /dev/null @@ -1,20 +0,0 @@ -/* exynos_drm_dmabuf.h - * - * Copyright (c) 2012 Samsung Electronics Co., Ltd. - * Author: Inki Dae - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef _EXYNOS_DRM_DMABUF_H_ -#define _EXYNOS_DRM_DMABUF_H_ - -struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev, - struct drm_gem_object *obj, int flags); - -struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, - struct dma_buf *dma_buf); -#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c index 7cb6595c1..c748b8790 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c @@ -20,26 +20,24 @@ #include